Repository: unskript/Awesome-CloudOps-Automation
Branch: master
Commit: 740f230d7d9c
Files: 2805
Total size: 5.3 MB
Directory structure:
gitextract_0y86xh8m/
├── .github/
│ ├── Anatomy-of-Runbook.md
│ ├── CONTRIBUTING.md
│ ├── DEVELOPERGUIDE.md
│ ├── ISSUE_TEMPLATE/
│ │ ├── add_action.yml
│ │ ├── add_chatgpt_action.yml
│ │ ├── add_credential.yml
│ │ ├── add_runbook.yml
│ │ ├── bug_report.md
│ │ ├── config.yml
│ │ └── feature_request.md
│ ├── PULL_REQUEST_TEMPLATE/
│ │ ├── action_pr_template.yml
│ │ ├── feature_request_template.yml
│ │ ├── lego_pr_template.md
│ │ └── runbook_pr_template.md
│ ├── code-of-conduct.md
│ ├── dependabot.yml
│ ├── guidelines-to-creating-runbook.md
│ ├── hfest_2022_resource.md
│ ├── images/
│ │ ├── actionShield.json
│ │ └── runbookShield.json
│ ├── pull_request_template.md
│ └── workflows/
│ ├── all_module_test.yml
│ ├── build-and-release-docker-lite.yml
│ ├── build-and-release-docker.yml
│ ├── codeql.yml
│ ├── dependency-review.yml
│ ├── generate_readme.yaml
│ ├── lint-test.yaml
│ ├── make-release.yaml
│ ├── run-legoschema.yml
│ ├── sanitize-runbook.yml
│ └── scorecards.yml
├── .gitignore
├── .pylintrc
├── .vscode/
│ └── settings.json
├── AWS/
│ ├── .gitignore
│ ├── AWS_Access_Key_Rotation.ipynb
│ ├── AWS_Access_Key_Rotation.json
│ ├── AWS_Add_Lifecycle_Policy_To_S3_Buckets.ipynb
│ ├── AWS_Add_Lifecycle_Policy_To_S3_Buckets.json
│ ├── AWS_Add_Mandatory_tags_to_EC2.ipynb
│ ├── AWS_Add_Mandatory_tags_to_EC2.json
│ ├── AWS_Add_Tag_Across_Selected_AWS_Resources.ipynb
│ ├── AWS_Add_Tag_Across_Selected_AWS_Resources.json
│ ├── AWS_Bulk_Update_Resource_Tag.ipynb
│ ├── AWS_Bulk_Update_Resource_Tag.json
│ ├── AWS_Change_EBS_Volume_To_GP3_Type.ipynb
│ ├── AWS_Change_EBS_Volume_To_GP3_Type.json
│ ├── AWS_Change_Route53_TTL.ipynb
│ ├── AWS_Change_Route53_TTL.json
│ ├── AWS_Create_New_IAM_User_With_Policy.ipynb
│ ├── AWS_Create_New_IAM_User_With_Policy.json
│ ├── AWS_Delete_EBS_Volumes_Attached_To_Stopped_Instances.ipynb
│ ├── AWS_Delete_EBS_Volumes_Attached_To_Stopped_Instances.json
│ ├── AWS_Delete_EBS_Volumes_With_Low_Usage.ipynb
│ ├── AWS_Delete_EBS_Volumes_With_Low_Usage.json
│ ├── AWS_Delete_ECS_Clusters_with_Low_CPU_Utilization.ipynb
│ ├── AWS_Delete_ECS_Clusters_with_Low_CPU_Utilization.json
│ ├── AWS_Delete_ELBs_With_No_Targets_Or_Instances.ipynb
│ ├── AWS_Delete_ELBs_With_No_Targets_Or_Instances.json
│ ├── AWS_Delete_IAM_User.ipynb
│ ├── AWS_Delete_IAM_User.json
│ ├── AWS_Delete_Old_EBS_Snapshots.ipynb
│ ├── AWS_Delete_Old_EBS_Snapshots.json
│ ├── AWS_Delete_RDS_Instances_with_Low_CPU_Utilization.ipynb
│ ├── AWS_Delete_RDS_Instances_with_Low_CPU_Utilization.json
│ ├── AWS_Delete_Redshift_Clusters_with_Low_CPU_Utilization.ipynb
│ ├── AWS_Delete_Redshift_Clusters_with_Low_CPU_Utilization.json
│ ├── AWS_Delete_Unattached_EBS_Volume.ipynb
│ ├── AWS_Delete_Unattached_EBS_Volume.json
│ ├── AWS_Delete_Unused_AWS_Secrets.ipynb
│ ├── AWS_Delete_Unused_AWS_Secrets.json
│ ├── AWS_Delete_Unused_Log_Streams.ipynb
│ ├── AWS_Delete_Unused_Log_Streams.json
│ ├── AWS_Delete_Unused_NAT_Gateways.ipynb
│ ├── AWS_Delete_Unused_NAT_Gateways.json
│ ├── AWS_Delete_Unused_Route53_Healthchecks.ipynb
│ ├── AWS_Delete_Unused_Route53_Healthchecks.json
│ ├── AWS_Detach_ec2_Instance_from_ASG.ipynb
│ ├── AWS_Detach_ec2_Instance_from_ASG.json
│ ├── AWS_EC2_Disk_Cleanup.ipynb
│ ├── AWS_EC2_Disk_Cleanup.json
│ ├── AWS_Enforce_HTTP_Redirection_across_AWS_ALB.ipynb
│ ├── AWS_Enforce_HTTP_Redirection_across_AWS_ALB.json
│ ├── AWS_Ensure_Redshift_Clusters_have_Paused_Resume_Enabled.ipynb
│ ├── AWS_Ensure_Redshift_Clusters_have_Paused_Resume_Enabled.json
│ ├── AWS_Get_Elb_Unhealthy_Instances.ipynb
│ ├── AWS_Get_Elb_Unhealthy_Instances.json
│ ├── AWS_Get_Redshift_Daily_Product_Costs.ipynb
│ ├── AWS_Get_Redshift_Daily_Product_Costs.json
│ ├── AWS_Get_Redshift_EC2_Daily_Costs.ipynb
│ ├── AWS_Get_Redshift_EC2_Daily_Costs.json
│ ├── AWS_Lowering_AWS_CloudTrail_Costs_by_Removing_Redundant_Trails.ipynb
│ ├── AWS_Lowering_AWS_CloudTrail_Costs_by_Removing_Redundant_Trails.json
│ ├── AWS_Notify_About_Unused_Keypairs.ipynb
│ ├── AWS_Notify_About_Unused_Keypairs.json
│ ├── AWS_Purchase_Reserved_Cache_Nodes_For_Long_Running_ElastiCache_Clusters.ipynb
│ ├── AWS_Purchase_Reserved_Cache_Nodes_For_Long_Running_ElastiCache_Clusters.json
│ ├── AWS_Purchase_Reserved_Instances_For_Long_Running_RDS_Instances.ipynb
│ ├── AWS_Purchase_Reserved_Instances_For_Long_Running_RDS_Instances.json
│ ├── AWS_Purchase_Reserved_Nodes_For_Long_Running_Redshift_Clusters.ipynb
│ ├── AWS_Purchase_Reserved_Nodes_For_Long_Running_Redshift_Clusters.json
│ ├── AWS_Release_Unattached_Elastic_IPs.ipynb
│ ├── AWS_Release_Unattached_Elastic_IPs.json
│ ├── AWS_Remediate_unencrypted_S3_buckets.ipynb
│ ├── AWS_Remediate_unencrypted_S3_buckets.json
│ ├── AWS_Renew_SSL_Certificate.ipynb
│ ├── AWS_Renew_SSL_Certificate.json
│ ├── AWS_Restart_Unhealthy_Services_Target_Group.ipynb
│ ├── AWS_Restart_Unhealthy_Services_Target_Group.json
│ ├── AWS_Restrict_S3_Buckets_with_READ_WRITE_Permissions.ipynb
│ ├── AWS_Restrict_S3_Buckets_with_READ_WRITE_Permissions.json
│ ├── AWS_Secure_Publicly_Accessible_RDS_Instances.ipynb
│ ├── AWS_Secure_Publicly_Accessible_RDS_Instances.json
│ ├── AWS_Secure_Publicly_accessible_Amazon_RDS_Snapshot.ipynb
│ ├── AWS_Secure_Publicly_accessible_Amazon_RDS_Snapshot.json
│ ├── AWS_Stop_Idle_EC2_Instances.ipynb
│ ├── AWS_Stop_Idle_EC2_Instances.json
│ ├── AWS_Stop_Untagged_EC2_Instances.ipynb
│ ├── AWS_Stop_Untagged_EC2_Instances.json
│ ├── AWS_Terminate_EC2_Instances_Without_Valid_Lifetime_Tag.ipynb
│ ├── AWS_Terminate_EC2_Instances_Without_Valid_Lifetime_Tag.json
│ ├── AWS_Update_RDS_Instances_from_Old_to_New_Generation.ipynb
│ ├── AWS_Update_RDS_Instances_from_Old_to_New_Generation.json
│ ├── AWS_Update_Redshift_Database.ipynb
│ ├── AWS_Update_Redshift_Database.json
│ ├── AWS_Update_Resource_Tags.ipynb
│ ├── AWS_Update_Resource_Tags.json
│ ├── AWS_Update_Resources_About_To_Expire.ipynb
│ ├── AWS_Update_Resources_About_To_Expire.json
│ ├── AWS_encrypt_unencrypted_S3_buckets.ipynb
│ ├── AWS_encrypt_unencrypted_S3_buckets.json
│ ├── Add_new_IAM_user.ipynb
│ ├── Add_new_IAM_user.json
│ ├── Configure_url_endpoint_on_a_cloudwatch_alarm.ipynb
│ ├── Configure_url_endpoint_on_a_cloudwatch_alarm.json
│ ├── Copy_ami_to_all_given_AWS_regions.ipynb
│ ├── Copy_ami_to_all_given_AWS_regions.json
│ ├── Delete_Unused_AWS_NAT_Gateways.ipynb
│ ├── Delete_Unused_AWS_NAT_Gateways.json
│ ├── Detach_Instance_from_ASG.ipynb
│ ├── Detach_Instance_from_ASG.json
│ ├── Detect_ECS_failed_deployment.ipynb
│ ├── Detect_ECS_failed_deployment.json
│ ├── Enforce_Mandatory_Tags_Across_All_AWS_Resources.ipynb
│ ├── Enforce_Mandatory_Tags_Across_All_AWS_Resources.json
│ ├── Find_EC2_Instances_Scheduled_to_retire.ipynb
│ ├── Find_EC2_Instances_Scheduled_to_retire.json
│ ├── IAM_security_least_privilege.ipynb
│ ├── IAM_security_least_privilege.json
│ ├── Monitor_AWS_DynamoDB_provision_capacity.ipynb
│ ├── Monitor_AWS_DynamoDB_provision_capacity.json
│ ├── README.md
│ ├── Resize_EBS_Volume.ipynb
│ ├── Resize_EBS_Volume.json
│ ├── Resize_List_Of_Pvcs.ipynb
│ ├── Resize_List_Of_Pvcs.json
│ ├── Restart_AWS_EC2_Instances_By_Tag.ipynb
│ ├── Restart_AWS_EC2_Instances_By_Tag.json
│ ├── Run_EC2_from_AMI.ipynb
│ ├── Run_EC2_from_AMI.json
│ ├── Troubleshooting_Your_EC2_Configuration_in_Private_Subnet.ipynb
│ ├── Troubleshooting_Your_EC2_Configuration_in_Private_Subnet.json
│ ├── Update_and_Manage_AWS_User_Permission.ipynb
│ ├── Update_and_Manage_AWS_User_Permission.json
│ ├── __init__.py
│ └── legos/
│ ├── AWS_Start_IAM_Policy_Generation/
│ │ ├── AWS_Start_IAM_Policy_Generation.json
│ │ ├── AWS_Start_IAM_Policy_Generation.py
│ │ ├── README.md
│ │ └── __init__.py
│ ├── __init__.py
│ ├── aws_add_lifecycle_configuration_to_s3_bucket/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_add_lifecycle_configuration_to_s3_bucket.json
│ │ └── aws_add_lifecycle_configuration_to_s3_bucket.py
│ ├── aws_apply_default_encryption_for_s3_buckets/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_apply_default_encryption_for_s3_buckets.json
│ │ └── aws_apply_default_encryption_for_s3_buckets.py
│ ├── aws_attach_ebs_to_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_attach_ebs_to_instances.json
│ │ └── aws_attach_ebs_to_instances.py
│ ├── aws_attach_iam_policy/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_attach_iam_policy.json
│ │ └── aws_attach_iam_policy.py
│ ├── aws_attach_tags_to_resources/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_attach_tags_to_resources.json
│ │ └── aws_attach_tags_to_resources.py
│ ├── aws_change_acl_permissions_of_buckets/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_change_acl_permissions_of_buckets.json
│ │ └── aws_change_acl_permissions_of_buckets.py
│ ├── aws_check_rds_non_m5_t3_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_check_rds_non_m5_t3_instances.json
│ │ └── aws_check_rds_non_m5_t3_instances.py
│ ├── aws_check_ssl_certificate_expiry/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_check_ssl_certificate_expiry.json
│ │ └── aws_check_ssl_certificate_expiry.py
│ ├── aws_cloudwatch_attach_webhook_notification_to_alarm/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_cloudwatch_attach_webhook_notification_to_alarm.json
│ │ └── aws_cloudwatch_attach_webhook_notification_to_alarm.py
│ ├── aws_create_IAMpolicy/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_create_IAMpolicy.json
│ │ └── aws_create_IAMpolicy.py
│ ├── aws_create_access_key/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_create_access_key.json
│ │ └── aws_create_access_key.py
│ ├── aws_create_bucket/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_create_bucket.json
│ │ └── aws_create_bucket.py
│ ├── aws_create_iam_user/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_create_iam_user.json
│ │ └── aws_create_iam_user.py
│ ├── aws_create_redshift_query/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_create_redshift_query.json
│ │ └── aws_create_redshift_query.py
│ ├── aws_create_user_login_profile/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_create_user_login_profile.json
│ │ └── aws_create_user_login_profile.py
│ ├── aws_create_volumes_snapshot/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_create_volumes_snapshot.json
│ │ └── aws_create_volumes_snapshot.py
│ ├── aws_delete_access_key/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_delete_access_key.json
│ │ └── aws_delete_access_key.py
│ ├── aws_delete_bucket/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_delete_bucket.json
│ │ └── aws_delete_bucket.py
│ ├── aws_delete_classic_load_balancer/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_delete_classic_load_balancer.json
│ │ └── aws_delete_classic_load_balancer.py
│ ├── aws_delete_ebs_snapshot/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_delete_ebs_snapshot.json
│ │ └── aws_delete_ebs_snapshot.py
│ ├── aws_delete_ecs_cluster/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_delete_ecs_cluster.json
│ │ └── aws_delete_ecs_cluster.py
│ ├── aws_delete_load_balancer/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_delete_load_balancer.json
│ │ └── aws_delete_load_balancer.py
│ ├── aws_delete_log_stream/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_delete_log_stream.json
│ │ └── aws_delete_log_stream.py
│ ├── aws_delete_nat_gateway/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_delete_nat_gateway.json
│ │ └── aws_delete_nat_gateway.py
│ ├── aws_delete_rds_instance/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_delete_rds_instance.json
│ │ └── aws_delete_rds_instance.py
│ ├── aws_delete_redshift_cluster/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_delete_redshift_cluster.json
│ │ └── aws_delete_redshift_cluster.py
│ ├── aws_delete_route53_health_check/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_delete_route53_health_check.json
│ │ └── aws_delete_route53_health_check.py
│ ├── aws_delete_s3_bucket_encryption/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_delete_s3_bucket_encryption.json
│ │ └── aws_delete_s3_bucket_encryption.py
│ ├── aws_delete_secret/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_delete_secret.json
│ │ └── aws_delete_secret.py
│ ├── aws_delete_volume_by_id/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_delete_volume_by_id.json
│ │ └── aws_delete_volume_by_id.py
│ ├── aws_deregister_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_deregister_instances.json
│ │ └── aws_deregister_instances.py
│ ├── aws_describe_cloudtrail/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_describe_cloudtrail.json
│ │ └── aws_describe_cloudtrail.py
│ ├── aws_detach_ebs_to_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_detach_ebs_to_instances.json
│ │ └── aws_detach_ebs_to_instances.py
│ ├── aws_detach_instances_from_autoscaling_group/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_detach_instances_from_autoscaling_group.json
│ │ └── aws_detach_instances_from_autoscaling_group.py
│ ├── aws_ebs_modify_volume/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_ebs_modify_volume.json
│ │ └── aws_ebs_modify_volume.py
│ ├── aws_ecs_describe_task_definition/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_ecs_describe_task_definition.json
│ │ └── aws_ecs_describe_task_definition.py
│ ├── aws_ecs_detect_failed_deployment/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_ecs_detect_failed_deployment.json
│ │ └── aws_ecs_detect_failed_deployment.py
│ ├── aws_ecs_service_restart/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_ecs_service_restart.json
│ │ └── aws_ecs_service_restart.py
│ ├── aws_ecs_update_service/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_ecs_update_service.json
│ │ └── aws_ecs_update_service.py
│ ├── aws_eks_copy_pod_logs_to_bucket/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_eks_copy_pod_logs_to_bucket.json
│ │ └── aws_eks_copy_pod_logs_to_bucket.py
│ ├── aws_eks_delete_pod/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_eks_delete_pod.json
│ │ └── aws_eks_delete_pod.py
│ ├── aws_eks_get_all_dead_pods/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_eks_get_all_dead_pods.json
│ │ └── aws_eks_get_all_dead_pods.py
│ ├── aws_eks_get_all_namespaces/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_eks_get_all_namespaces.json
│ │ └── aws_eks_get_all_namespaces.py
│ ├── aws_eks_get_all_pods/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_eks_get_all_pods.json
│ │ └── aws_eks_get_all_pods.py
│ ├── aws_eks_get_deployments_name/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_eks_get_deployments_name.json
│ │ └── aws_eks_get_deployments_name.py
│ ├── aws_eks_get_node_cpu_memory/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_eks_get_node_cpu_memory.json
│ │ └── aws_eks_get_node_cpu_memory.py
│ ├── aws_eks_get_nodes/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_eks_get_nodes.json
│ │ └── aws_eks_get_nodes.py
│ ├── aws_eks_get_not_running_pods/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_eks_get_not_running_pods.json
│ │ └── aws_eks_get_not_running_pods.py
│ ├── aws_eks_get_pod_cpu_memory/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_eks_get_pod_cpu_memory.json
│ │ └── aws_eks_get_pod_cpu_memory.py
│ ├── aws_eks_get_pod_status/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_eks_get_pod_status.json
│ │ └── aws_eks_get_pod_status.py
│ ├── aws_eks_get_running_pods/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_eks_get_running_pods.json
│ │ └── aws_eks_get_running_pods.py
│ ├── aws_eks_run_kubectl_cmd/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_eks_run_kubectl_cmd.json
│ │ └── aws_eks_run_kubectl_cmd.py
│ ├── aws_emr_get_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_emr_get_instances.json
│ │ └── aws_emr_get_instances.py
│ ├── aws_execute_cli_command/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_execute_cli_command.json
│ │ └── aws_execute_cli_command.py
│ ├── aws_execute_command_ssm/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_execute_command_ssm.json
│ │ └── aws_execute_command_ssm.py
│ ├── aws_filter_all_manual_database_snapshots/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_all_manual_database_snapshots.json
│ │ └── aws_filter_all_manual_database_snapshots.py
│ ├── aws_filter_ebs_unattached_volumes/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_ebs_unattached_volumes.json
│ │ └── aws_filter_ebs_unattached_volumes.py
│ ├── aws_filter_ebs_volumes_with_low_iops/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_ebs_volumes_with_low_iops.json
│ │ └── aws_filter_ebs_volumes_with_low_iops.py
│ ├── aws_filter_ec2_by_tags/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_ec2_by_tags.json
│ │ └── aws_filter_ec2_by_tags.py
│ ├── aws_filter_ec2_by_vpc/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_ec2_by_vpc.json
│ │ └── aws_filter_ec2_by_vpc.py
│ ├── aws_filter_ec2_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_ec2_instances.json
│ │ └── aws_filter_ec2_instances.py
│ ├── aws_filter_ec2_without_lifetime_tag/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_ec2_without_lifetime_tag.json
│ │ └── aws_filter_ec2_without_lifetime_tag.py
│ ├── aws_filter_instances_without_termination_and_lifetime_tag/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_instances_without_termination_and_lifetime_tag.json
│ │ └── aws_filter_instances_without_termination_and_lifetime_tag.py
│ ├── aws_filter_large_ec2_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_large_ec2_instances.json
│ │ └── aws_filter_large_ec2_instances.py
│ ├── aws_filter_long_running_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_long_running_instances.json
│ │ └── aws_filter_long_running_instances.py
│ ├── aws_filter_old_ebs_snapshots/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_old_ebs_snapshots.json
│ │ └── aws_filter_old_ebs_snapshots.py
│ ├── aws_filter_public_s3_buckets_by_acl/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_public_s3_buckets_by_acl.json
│ │ └── aws_filter_public_s3_buckets_by_acl.py
│ ├── aws_filter_target_groups_by_tags/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_target_groups_by_tags.json
│ │ └── aws_filter_target_groups_by_tags.py
│ ├── aws_filter_unencrypted_s3_buckets/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_unencrypted_s3_buckets.json
│ │ └── aws_filter_unencrypted_s3_buckets.py
│ ├── aws_filter_unhealthy_instances_from_asg/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_unhealthy_instances_from_asg.json
│ │ └── aws_filter_unhealthy_instances_from_asg.py
│ ├── aws_filter_untagged_ec2_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_untagged_ec2_instances.json
│ │ └── aws_filter_untagged_ec2_instances.py
│ ├── aws_filter_unused_keypairs/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_unused_keypairs.json
│ │ └── aws_filter_unused_keypairs.py
│ ├── aws_filter_unused_log_streams/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_unused_log_streams.json
│ │ └── aws_filter_unused_log_streams.py
│ ├── aws_filter_unused_nat_gateway/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_filter_unused_nat_gateway.json
│ │ └── aws_filter_unused_nat_gateway.py
│ ├── aws_find_elbs_with_no_targets_or_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_find_elbs_with_no_targets_or_instances.json
│ │ └── aws_find_elbs_with_no_targets_or_instances.py
│ ├── aws_find_idle_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_find_idle_instances.json
│ │ └── aws_find_idle_instances.py
│ ├── aws_find_long_running_lambdas/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_find_long_running_lambdas.json
│ │ └── aws_find_long_running_lambdas.py
│ ├── aws_find_low_connection_rds_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_find_low_connection_rds_instances.json
│ │ └── aws_find_low_connection_rds_instances.py
│ ├── aws_find_old_gen_emr_clusters/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_find_old_gen_emr_clusters.json
│ │ └── aws_find_old_gen_emr_clusters.py
│ ├── aws_find_rds_instances_with_low_cpu_utilization/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_find_rds_instances_with_low_cpu_utilization.json
│ │ └── aws_find_rds_instances_with_low_cpu_utilization.py
│ ├── aws_find_redshift_cluster_without_pause_resume_enabled/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_find_redshift_cluster_without_pause_resume_enabled.json
│ │ └── aws_find_redshift_cluster_without_pause_resume_enabled.py
│ ├── aws_find_redshift_clusters_with_low_cpu_utilization/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_find_redshift_clusters_with_low_cpu_utilization.json
│ │ └── aws_find_redshift_clusters_with_low_cpu_utilization.py
│ ├── aws_find_s3_buckets_without_lifecycle_policies/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_find_s3_buckets_without_lifecycle_policies.json
│ │ └── aws_find_s3_buckets_without_lifecycle_policies.py
│ ├── aws_finding_redundant_trails/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_finding_redundant_trails.json
│ │ └── aws_finding_redundant_trails.py
│ ├── aws_get_acount_number/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_acount_number.json
│ │ └── aws_get_acount_number.py
│ ├── aws_get_alarms_list/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_alarms_list.json
│ │ └── aws_get_alarms_list.py
│ ├── aws_get_alb_listeners_without_http_redirect/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_alb_listeners_without_http_redirect.json
│ │ └── aws_get_alb_listeners_without_http_redirect.py
│ ├── aws_get_all_ec2_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_all_ec2_instances.json
│ │ └── aws_get_all_ec2_instances.py
│ ├── aws_get_all_load_balancers/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_all_load_balancers.json
│ │ └── aws_get_all_load_balancers.py
│ ├── aws_get_all_service_names/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_all_service_names.json
│ │ └── aws_get_all_service_names.py
│ ├── aws_get_all_untagged_resources/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_all_untagged_resources.json
│ │ └── aws_get_all_untagged_resources.py
│ ├── aws_get_auto_scaling_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_auto_scaling_instances.json
│ │ └── aws_get_auto_scaling_instances.py
│ ├── aws_get_bucket_size/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_bucket_size.json
│ │ └── aws_get_bucket_size.py
│ ├── aws_get_cloudwatch_ebs/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_cloudwatch_ebs.json
│ │ └── aws_get_cloudwatch_ebs.py
│ ├── aws_get_cloudwatch_ec2/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_cloudwatch_ec2.json
│ │ └── aws_get_cloudwatch_ec2.py
│ ├── aws_get_cloudwatch_ec2_cpuutil/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_cloudwatch_ec2_cpuutil.json
│ │ └── aws_get_cloudwatch_ec2_cpuutil.py
│ ├── aws_get_cloudwatch_metrics_applicationelb/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_cloudwatch_metrics_applicationelb.json
│ │ └── aws_get_cloudwatch_metrics_applicationelb.py
│ ├── aws_get_cloudwatch_metrics_classic_elb/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_cloudwatch_metrics_classic_elb.json
│ │ └── aws_get_cloudwatch_metrics_classic_elb.py
│ ├── aws_get_cloudwatch_metrics_dynamodb/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_cloudwatch_metrics_dynamodb.json
│ │ └── aws_get_cloudwatch_metrics_dynamodb.py
│ ├── aws_get_cloudwatch_metrics_ec2autoscaling/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_cloudwatch_metrics_ec2autoscaling.json
│ │ └── aws_get_cloudwatch_metrics_ec2autoscaling.py
│ ├── aws_get_cloudwatch_metrics_gatewayelb/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_cloudwatch_metrics_gatewayelb.json
│ │ └── aws_get_cloudwatch_metrics_gatewayelb.py
│ ├── aws_get_cloudwatch_metrics_lambda/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_cloudwatch_metrics_lambda.json
│ │ └── aws_get_cloudwatch_metrics_lambda.py
│ ├── aws_get_cloudwatch_metrics_network_elb/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_cloudwatch_metrics_network_elb.json
│ │ └── aws_get_cloudwatch_metrics_network_elb.py
│ ├── aws_get_cloudwatch_metrics_rds/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_cloudwatch_metrics_rds.json
│ │ └── aws_get_cloudwatch_metrics_rds.py
│ ├── aws_get_cloudwatch_metrics_redshift/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_cloudwatch_metrics_redshift.json
│ │ └── aws_get_cloudwatch_metrics_redshift.py
│ ├── aws_get_cloudwatch_metrics_sqs/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_cloudwatch_metrics_sqs.json
│ │ └── aws_get_cloudwatch_metrics_sqs.py
│ ├── aws_get_cloudwatch_statistics/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_cloudwatch_statistics.json
│ │ └── aws_get_cloudwatch_statistics.py
│ ├── aws_get_cost_for_all_services/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_cost_for_all_services.json
│ │ └── aws_get_cost_for_all_services.py
│ ├── aws_get_cost_for_data_transfer/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_cost_for_data_transfer.json
│ │ └── aws_get_cost_for_data_transfer.py
│ ├── aws_get_daily_total_spend/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_daily_total_spend.json
│ │ └── aws_get_daily_total_spend.py
│ ├── aws_get_ebs_volume_for_low_usage/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_ebs_volume_for_low_usage.json
│ │ └── aws_get_ebs_volume_for_low_usage.py
│ ├── aws_get_ebs_volumes_by_type/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_ebs_volumes_by_type.json
│ │ └── aws_get_ebs_volumes_by_type.py
│ ├── aws_get_ebs_volumes_without_gp3_type/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_ebs_volumes_without_gp3_type.json
│ │ └── aws_get_ebs_volumes_without_gp3_type.py
│ ├── aws_get_ec2_cpu_consumption/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_ec2_cpu_consumption.json
│ │ └── aws_get_ec2_cpu_consumption.py
│ ├── aws_get_ec2_data_traffic/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_ec2_data_traffic.json
│ │ └── aws_get_ec2_data_traffic.py
│ ├── aws_get_ec2_instance_age/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_ec2_instance_age.json
│ │ └── aws_get_ec2_instance_age.py
│ ├── aws_get_ec2_instances_with_smaller_cpu_size/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_ec2_instances_with_smaller_cpu_size.json
│ │ └── aws_get_ec2_instances_with_smaller_cpu_size.py
│ ├── aws_get_ecs_instances_without_autoscaling/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_ecs_instances_without_autoscaling.json
│ │ └── aws_get_ecs_instances_without_autoscaling.py
│ ├── aws_get_ecs_services_status/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_ecs_services_status.json
│ │ └── aws_get_ecs_services_status.py
│ ├── aws_get_ecs_services_without_autoscaling/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_ecs_services_without_autoscaling.json
│ │ └── aws_get_ecs_services_without_autoscaling.py
│ ├── aws_get_generated_policy/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_generated_policy.json
│ │ └── aws_get_generated_policy.py
│ ├── aws_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_handle.json
│ │ └── aws_get_handle.py
│ ├── aws_get_iam_users_without_attached_policies/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_iam_users_without_attached_policies.json
│ │ └── aws_get_iam_users_without_attached_policies.py
│ ├── aws_get_idle_emr_clusters/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_idle_emr_clusters.json
│ │ └── aws_get_idle_emr_clusters.py
│ ├── aws_get_instance_detail_with_private_dns_name/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_instance_detail_with_private_dns_name.json
│ │ └── aws_get_instance_detail_with_private_dns_name.py
│ ├── aws_get_instance_details/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_instance_details.json
│ │ └── aws_get_instance_details.py
│ ├── aws_get_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_instances.json
│ │ └── aws_get_instances.py
│ ├── aws_get_internet_gateway_by_vpc/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_internet_gateway_by_vpc.json
│ │ └── aws_get_internet_gateway_by_vpc.py
│ ├── aws_get_lambdas_not_using_arm_graviton2_processor/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_lambdas_not_using_arm_graviton2_processor.json
│ │ └── aws_get_lambdas_not_using_arm_graviton2_processor.py
│ ├── aws_get_lambdas_with_high_error_rate/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_lambdas_with_high_error_rate.json
│ │ └── aws_get_lambdas_with_high_error_rate.py
│ ├── aws_get_long_running_elasticcache_clusters_without_reserved_nodes/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_long_running_elasticcache_clusters_without_reserved_nodes.json
│ │ └── aws_get_long_running_elasticcache_clusters_without_reserved_nodes.py
│ ├── aws_get_long_running_rds_instances_without_reserved_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_long_running_rds_instances_without_reserved_instances.json
│ │ └── aws_get_long_running_rds_instances_without_reserved_instances.py
│ ├── aws_get_long_running_redshift_clusters_without_reserved_nodes/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_long_running_redshift_clusters_without_reserved_nodes.json
│ │ └── aws_get_long_running_redshift_clusters_without_reserved_nodes.py
│ ├── aws_get_nat_gateway_by_vpc/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_nat_gateway_by_vpc.json
│ │ └── aws_get_nat_gateway_by_vpc.py
│ ├── aws_get_nlb_targets/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_nlb_targets.json
│ │ └── aws_get_nlb_targets.py
│ ├── aws_get_nlbs_without_targets/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_nlbs_without_targets.json
│ │ └── aws_get_nlbs_without_targets.py
│ ├── aws_get_older_generation_rds_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_older_generation_rds_instances.json
│ │ └── aws_get_older_generation_rds_instances.py
│ ├── aws_get_private_address_from_nat_gateways/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_private_address_from_nat_gateways.json
│ │ └── aws_get_private_address_from_nat_gateways.py
│ ├── aws_get_public_ec2_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_public_ec2_instances.json
│ │ └── aws_get_public_ec2_instances.py
│ ├── aws_get_publicly_accessible_db_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_publicly_accessible_db_instances.json
│ │ └── aws_get_publicly_accessible_db_instances.py
│ ├── aws_get_publicly_accessible_db_snapshots/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_publicly_accessible_db_snapshots.json
│ │ └── aws_get_publicly_accessible_db_snapshots.py
│ ├── aws_get_rds_automated_snapshots_above_retention_period/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_rds_automated_snapshots_above_retention_period.json
│ │ └── aws_get_rds_automated_snapshots_above_retention_period.py
│ ├── aws_get_redshift_query_details/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_redshift_query_details.json
│ │ └── aws_get_redshift_query_details.py
│ ├── aws_get_redshift_result/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_redshift_result.json
│ │ └── aws_get_redshift_result.py
│ ├── aws_get_reserved_instances_about_to_retired/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_reserved_instances_about_to_retired.json
│ │ └── aws_get_reserved_instances_about_to_retired.py
│ ├── aws_get_resources_missing_tag/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_resources_missing_tag.json
│ │ └── aws_get_resources_missing_tag.py
│ ├── aws_get_resources_with_expiration_tag/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_resources_with_expiration_tag.json
│ │ └── aws_get_resources_with_expiration_tag.py
│ ├── aws_get_resources_with_tag/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_resources_with_tag.json
│ │ └── aws_get_resources_with_tag.py
│ ├── aws_get_s3_buckets/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_s3_buckets.json
│ │ └── aws_get_s3_buckets.py
│ ├── aws_get_schedule_to_retire_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_schedule_to_retire_instances.json
│ │ └── aws_get_schedule_to_retire_instances.py
│ ├── aws_get_secret_from_secretmanager/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_secret_from_secretmanager.json
│ │ └── aws_get_secret_from_secretmanager.py
│ ├── aws_get_secrets_manager_secret/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_secrets_manager_secret.json
│ │ └── aws_get_secrets_manager_secret.py
│ ├── aws_get_secrets_manager_secretARN/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_secrets_manager_secretARN.json
│ │ └── aws_get_secrets_manager_secretARN.py
│ ├── aws_get_security_group_details/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_security_group_details.json
│ │ └── aws_get_security_group_details.py
│ ├── aws_get_service_quota_details/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_service_quota_details.json
│ │ └── aws_get_service_quota_details.py
│ ├── aws_get_service_quotas/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_service_quotas.json
│ │ └── aws_get_service_quotas.py
│ ├── aws_get_stopped_instance_volumes/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_stopped_instance_volumes.json
│ │ └── aws_get_stopped_instance_volumes.py
│ ├── aws_get_sts_caller_identity/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_sts_caller_identity.json
│ │ └── aws_get_sts_caller_identity.py
│ ├── aws_get_tags_of_all_resources/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_tags_of_all_resources.json
│ │ └── aws_get_tags_of_all_resources.py
│ ├── aws_get_timed_out_lambdas/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_timed_out_lambdas.json
│ │ └── aws_get_timed_out_lambdas.py
│ ├── aws_get_ttl_for_route53_records/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_ttl_for_route53_records.json
│ │ └── aws_get_ttl_for_route53_records.py
│ ├── aws_get_ttl_under_given_hours/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_ttl_under_given_hours.json
│ │ └── aws_get_ttl_under_given_hours.py
│ ├── aws_get_unhealthy_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_unhealthy_instances.json
│ │ └── aws_get_unhealthy_instances.py
│ ├── aws_get_unhealthy_instances_from_elb/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_unhealthy_instances_from_elb.json
│ │ └── aws_get_unhealthy_instances_from_elb.py
│ ├── aws_get_unused_route53_health_checks/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_unused_route53_health_checks.json
│ │ └── aws_get_unused_route53_health_checks.py
│ ├── aws_get_users_with_old_access_keys/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_get_users_with_old_access_keys.json
│ │ └── aws_get_users_with_old_access_keys.py
│ ├── aws_launch_instance_from_ami/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_launch_instance_from_ami.json
│ │ └── aws_launch_instance_from_ami.py
│ ├── aws_list_access_keys/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_list_access_keys.json
│ │ └── aws_list_access_keys.py
│ ├── aws_list_all_iam_users/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_list_all_iam_users.json
│ │ └── aws_list_all_iam_users.py
│ ├── aws_list_all_regions/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_list_all_regions.json
│ │ └── aws_list_all_regions.py
│ ├── aws_list_application_loadbalancers/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_list_application_loadbalancers.json
│ │ └── aws_list_application_loadbalancers.py
│ ├── aws_list_attached_user_policies/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_list_attached_user_policies.json
│ │ └── aws_list_attached_user_policies.py
│ ├── aws_list_clusters_with_low_utilization/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_list_clusters_with_low_utilization.json
│ │ └── aws_list_clusters_with_low_utilization.py
│ ├── aws_list_expiring_access_keys/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_list_expiring_access_keys.json
│ │ └── aws_list_expiring_access_keys.py
│ ├── aws_list_expiring_acm_certificates/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_list_expiring_acm_certificates.json
│ │ └── aws_list_expiring_acm_certificates.py
│ ├── aws_list_hosted_zones/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_list_hosted_zones.json
│ │ └── aws_list_hosted_zones.py
│ ├── aws_list_unattached_elastic_ips/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_list_unattached_elastic_ips.json
│ │ └── aws_list_unattached_elastic_ips.py
│ ├── aws_list_unhealthy_instances_in_target_group/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_list_unhealthy_instances_in_target_group.json
│ │ └── aws_list_unhealthy_instances_in_target_group.py
│ ├── aws_list_unused_secrets/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_list_unused_secrets.json
│ │ └── aws_list_unused_secrets.py
│ ├── aws_list_users_with_old_passwords/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_list_users_with_old_passwords.json
│ │ └── aws_list_users_with_old_passwords.py
│ ├── aws_loadbalancer_list_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_loadbalancer_list_instances.json
│ │ └── aws_loadbalancer_list_instances.py
│ ├── aws_make_bucket_public/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_make_bucket_public.json
│ │ └── aws_make_bucket_public.py
│ ├── aws_make_rds_instance_not_publicly_accessible/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_make_rds_instance_not_publicly_accessible.json
│ │ └── aws_make_rds_instance_not_publicly_accessible.py
│ ├── aws_modify_ebs_volume_to_gp3/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_modify_ebs_volume_to_gp3.json
│ │ └── aws_modify_ebs_volume_to_gp3.py
│ ├── aws_modify_listener_for_http_redirection/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_modify_listener_for_http_redirection.json
│ │ └── aws_modify_listener_for_http_redirection.py
│ ├── aws_modify_public_db_snapshots/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_modify_public_db_snapshots.json
│ │ └── aws_modify_public_db_snapshots.py
│ ├── aws_postgresql_get_configured_max_connections/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_postgresql_get_configured_max_connections.json
│ │ └── aws_postgresql_get_configured_max_connections.py
│ ├── aws_postgresql_plot_active_connections/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_postgresql_plot_active_connections.json
│ │ └── aws_postgresql_plot_active_connections.py
│ ├── aws_purchase_elasticcache_reserved_node/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_purchase_elasticcache_reserved_node.json
│ │ └── aws_purchase_elasticcache_reserved_node.py
│ ├── aws_purchase_rds_reserved_instance/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_purchase_rds_reserved_instance.json
│ │ └── aws_purchase_rds_reserved_instance.py
│ ├── aws_purchase_redshift_reserved_node/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_purchase_redshift_reserved_node.json
│ │ └── aws_purchase_redshift_reserved_node.py
│ ├── aws_put_bucket_cors/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_put_bucket_cors.json
│ │ └── aws_put_bucket_cors.py
│ ├── aws_put_bucket_policy/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_put_bucket_policy.json
│ │ └── aws_put_bucket_policy.py
│ ├── aws_read_object/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_read_object.json
│ │ └── aws_read_object.py
│ ├── aws_register_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_register_instances.json
│ │ └── aws_register_instances.py
│ ├── aws_release_elastic_ip/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_release_elastic_ip.json
│ │ └── aws_release_elastic_ip.py
│ ├── aws_renew_expiring_acm_certificates/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_renew_expiring_acm_certificates.json
│ │ └── aws_renew_expiring_acm_certificates.py
│ ├── aws_request_service_quota_increase/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_request_service_quota_increase.json
│ │ └── aws_request_service_quota_increase.py
│ ├── aws_restart_ec2_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_restart_ec2_instances.json
│ │ └── aws_restart_ec2_instances.py
│ ├── aws_revoke_policy_from_iam_user/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_revoke_policy_from_iam_user.json
│ │ └── aws_revoke_policy_from_iam_user.py
│ ├── aws_run_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_run_instances.json
│ │ └── aws_run_instances.py
│ ├── aws_schedule_pause_resume_enabled/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_schedule_pause_resume_enabled.json
│ │ └── aws_schedule_pause_resume_enabled.py
│ ├── aws_send_email/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_send_email.json
│ │ └── aws_send_email.py
│ ├── aws_service_quota_limits/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_service_quota_limits.json
│ │ └── aws_service_quota_limits.py
│ ├── aws_service_quota_limits_vpc/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_service_quota_limits_vpc.json
│ │ └── aws_service_quota_limits_vpc.py
│ ├── aws_stop_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_stop_instances.json
│ │ └── aws_stop_instances.py
│ ├── aws_tag_ec2_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_tag_ec2_instances.json
│ │ └── aws_tag_ec2_instances.py
│ ├── aws_target_group_list_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_target_group_list_instances.json
│ │ └── aws_target_group_list_instances.py
│ ├── aws_target_group_list_unhealthy_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_target_group_list_unhealthy_instances.json
│ │ └── aws_target_group_list_unhealthy_instances.py
│ ├── aws_target_group_register_unregister_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_target_group_register_unregister_instances.json
│ │ └── aws_target_group_register_unregister_instances.py
│ ├── aws_terminate_ec2_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_terminate_ec2_instances.json
│ │ └── aws_terminate_ec2_instances.py
│ ├── aws_update_access_key/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_update_access_key.json
│ │ └── aws_update_access_key.py
│ ├── aws_update_ttl_for_route53_records/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_update_ttl_for_route53_records.json
│ │ └── aws_update_ttl_for_route53_records.py
│ ├── aws_upload_file_to_s3/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── aws_upload_file_to_s3.json
│ │ └── aws_upload_file_to_s3.py
│ └── aws_vpc_service_quota_warning/
│ ├── README.md
│ ├── __init__.py
│ ├── aws_vpc_service_quota_warning.json
│ └── aws_vpc_service_quota_warning.py
├── Airflow/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── airflow_check_dag_status/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── airflow_check_dag_status.json
│ │ └── airflow_check_dag_status.py
│ ├── airflow_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── airflow_get_handle.json
│ │ └── airflow_get_handle.py
│ ├── airflow_list_DAG_runs/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── airflow_list_DAG_runs.json
│ │ └── airflow_list_DAG_runs.py
│ └── airflow_trigger_dag_run/
│ ├── README.md
│ ├── __init__.py
│ ├── airflow_trigger_dag_run.json
│ └── airflow_trigger_dag_run.py
├── Azure/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ └── azure_get_handle/
│ ├── README.md
│ ├── __init__.py
│ ├── azure_get_handle.json
│ └── azure_get_handle.py
├── Datadog/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── datadog_delete_incident/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── datadog_delete_incident.json
│ │ └── datadog_delete_incident.py
│ ├── datadog_get_event/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── datadog_get_event.json
│ │ └── datadog_get_event.py
│ ├── datadog_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── datadog_get_handle.json
│ │ └── datadog_get_handle.py
│ ├── datadog_get_incident/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── datadog_get_incident.json
│ │ └── datadog_get_incident.py
│ ├── datadog_get_metric_metadata/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── datadog_get_metric_metadata.json
│ │ └── datadog_get_metric_metadata.py
│ ├── datadog_get_monitor/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── datadog_get_monitor.json
│ │ └── datadog_get_monitor.py
│ ├── datadog_get_monitorid/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── datadog_get_monitorid.json
│ │ └── datadog_get_monitorid.py
│ ├── datadog_list_active_metrics/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── datadog_list_active_metrics.json
│ │ └── datadog_list_active_metrics.py
│ ├── datadog_list_all_monitors/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── datadog_list_all_monitors.json
│ │ └── datadog_list_all_monitors.py
│ ├── datadog_list_metrics/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── datadog_list_metrics.json
│ │ └── datadog_list_metrics.py
│ ├── datadog_mute_or_unmute_alerts/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── datadog_mute_or_unmute_alerts.json
│ │ └── datadog_mute_or_unmute_alerts.py
│ ├── datadog_query_metrics/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── datadog_query_metrics.json
│ │ └── datadog_query_metrics.py
│ ├── datadog_schedule_downtime/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── datadog_schedule_downtime.json
│ │ └── datadog_schedule_downtime.py
│ └── datadog_search_monitors/
│ ├── README.md
│ ├── __init__.py
│ ├── datadog_search_monitors.json
│ └── datadog_search_monitors.py
├── Docs/
│ └── README.md
├── ElasticSearch/
│ ├── .gitignore
│ ├── Elasticsearch_Rolling_Restart.ipynb
│ ├── Elasticsearch_Rolling_Restart.json
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── elasticsearch_check_health_status/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── elasticsearch_check_health_status.json
│ │ └── elasticsearch_check_health_status.py
│ ├── elasticsearch_check_large_index_size/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── elasticsearch_check_large_index_size.json
│ │ └── elasticsearch_check_large_index_size.py
│ ├── elasticsearch_compare_cluster_disk_size_to_threshold/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── elasticsearch_compare_cluster_disk_size_to_threshold.json
│ │ └── elasticsearch_compare_cluster_disk_size_to_threshold.py
│ ├── elasticsearch_delete_unassigned_shards/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── elasticsearch_delete_unassigned_shards.json
│ │ └── elasticsearch_delete_unassigned_shards.py
│ ├── elasticsearch_disable_shard_allocation/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── elasticsearch_disable_shard_allocation.json
│ │ └── elasticsearch_disable_shard_allocation.py
│ ├── elasticsearch_enable_shard_allocation/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── elasticsearch_enable_shard_allocation.json
│ │ └── elasticsearch_enable_shard_allocation.py
│ ├── elasticsearch_get_cluster_statistics/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── elasticsearch_get_cluster_statistics.json
│ │ └── elasticsearch_get_cluster_statistics.py
│ ├── elasticsearch_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── elasticsearch_get_handle.json
│ │ └── elasticsearch_get_handle.py
│ ├── elasticsearch_get_index_health/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── elasticsearch_get_index_health.json
│ │ └── elasticsearch_get_index_health.py
│ ├── elasticsearch_list_allocations/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── elasticsearch_list_allocations.json
│ │ └── elasticsearch_list_allocations.py
│ ├── elasticsearch_list_nodes/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── elasticsearch_list_nodes.json
│ │ └── elasticsearch_list_nodes.py
│ └── elasticsearch_search_query/
│ ├── README.md
│ ├── __init__.py
│ ├── elasticsearch_search_query.json
│ └── elasticsearch_search_query.py
├── GCP/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── gcp_add_lifecycle_policy_to_bucket/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_add_lifecycle_policy_to_bucket.json
│ │ └── gcp_add_lifecycle_policy_to_bucket.py
│ ├── gcp_add_member_to_iam_role/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_add_member_to_iam_role.json
│ │ └── gcp_add_member_to_iam_role.py
│ ├── gcp_add_role_to_service_account/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_add_role_to_service_account.json
│ │ └── gcp_add_role_to_service_account.py
│ ├── gcp_create_bucket/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_create_bucket.json
│ │ └── gcp_create_bucket.py
│ ├── gcp_create_disk_snapshot/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_create_disk_snapshot.json
│ │ └── gcp_create_disk_snapshot.py
│ ├── gcp_create_filestore_instance/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_create_filestore_instance.json
│ │ └── gcp_create_filestore_instance.py
│ ├── gcp_create_gke_cluster/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_create_gke_cluster.json
│ │ └── gcp_create_gke_cluster.py
│ ├── gcp_create_service_account/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_create_service_account.json
│ │ └── gcp_create_service_account.py
│ ├── gcp_delete_bucket/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_delete_bucket.json
│ │ └── gcp_delete_bucket.py
│ ├── gcp_delete_filestore_instance/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_delete_filestore_instance.json
│ │ └── gcp_delete_filestore_instance.py
│ ├── gcp_delete_object_from_bucket/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_delete_object_from_bucket.json
│ │ └── gcp_delete_object_from_bucket.py
│ ├── gcp_delete_service_account/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_delete_service_account.json
│ │ └── gcp_delete_service_account.py
│ ├── gcp_describe_gke_cluster/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_describe_gke_cluster.json
│ │ └── gcp_describe_gke_cluster.py
│ ├── gcp_fetch_objects_from_bucket/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_fetch_objects_from_bucket.json
│ │ └── gcp_fetch_objects_from_bucket.py
│ ├── gcp_get_buckets_without_lifecycle_policies/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_get_buckets_without_lifecycle_policies.json
│ │ └── gcp_get_buckets_without_lifecycle_policies.py
│ ├── gcp_get_forwarding_rules_details/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_get_forwarding_rules_details.json
│ │ └── gcp_get_forwarding_rules_details.py
│ ├── gcp_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_get_handle.json
│ │ └── gcp_get_handle.py
│ ├── gcp_get_instances_without_label/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_get_instances_without_label.json
│ │ └── gcp_get_instances_without_label.py
│ ├── gcp_get_unused_backend_services/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_get_unused_backend_services.json
│ │ └── gcp_get_unused_backend_services.py
│ ├── gcp_list_buckets/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_list_buckets.json
│ │ └── gcp_list_buckets.py
│ ├── gcp_list_compute_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_list_compute_instances.json
│ │ └── gcp_list_compute_instances.py
│ ├── gcp_list_compute_instances_by_label/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_list_compute_instances_by_label.json
│ │ └── gcp_list_compute_instances_by_label.py
│ ├── gcp_list_compute_instances_by_vpc/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_list_compute_instances_by_vpc.json
│ │ └── gcp_list_compute_instances_by_vpc.py
│ ├── gcp_list_gke_cluster/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_list_gke_cluster.json
│ │ └── gcp_list_gke_cluster.py
│ ├── gcp_list_nodes_in_gke_cluster/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_list_nodes_in_gke_cluster.json
│ │ └── gcp_list_nodes_in_gke_cluster.py
│ ├── gcp_list_public_buckets/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_list_public_buckets.json
│ │ └── gcp_list_public_buckets.py
│ ├── gcp_list_secrets/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_list_secrets.json
│ │ └── gcp_list_secrets.py
│ ├── gcp_list_service_accounts/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_list_service_accounts.json
│ │ └── gcp_list_service_accounts.py
│ ├── gcp_list_vms_access/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_list_vms_access.json
│ │ └── gcp_list_vms_access.py
│ ├── gcp_remove_member_from_iam_role/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_remove_member_from_iam_role.json
│ │ └── gcp_remove_member_from_iam_role.py
│ ├── gcp_remove_role_from_service_account/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_remove_role_from_service_account.json
│ │ └── gcp_remove_role_from_service_account.py
│ ├── gcp_remove_user_role/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_remove_user_role.json
│ │ └── gcp_remove_user_role.py
│ ├── gcp_resize_gke_cluster/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_resize_gke_cluster.json
│ │ └── gcp_resize_gke_cluster.py
│ ├── gcp_restart_compute_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_restart_compute_instances.json
│ │ └── gcp_restart_compute_instances.py
│ ├── gcp_restore_disk_from_snapshot/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_restore_disk_from_snapshot.json
│ │ └── gcp_restore_disk_from_snapshot.py
│ ├── gcp_save_csv_to_google_sheets_v1/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_save_csv_to_google_sheets_v1.json
│ │ └── gcp_save_csv_to_google_sheets_v1.py
│ ├── gcp_stop_compute_instances/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── gcp_stop_compute_instances.json
│ │ └── gcp_stop_compute_instances.py
│ └── gcp_upload_file_to_bucket/
│ ├── README.md
│ ├── __init__.py
│ ├── gcp_upload_file_to_bucket.json
│ └── gcp_upload_file_to_bucket.py
├── Github/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── github_assign_issue/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_assign_issue.json
│ │ └── github_assign_issue.py
│ ├── github_close_pull_request/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_close_pull_request.json
│ │ └── github_close_pull_request.py
│ ├── github_count_stars/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_count_stars.json
│ │ └── github_count_stars.py
│ ├── github_create_issue/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_create_issue.json
│ │ └── github_create_issue.py
│ ├── github_create_team/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_create_team.json
│ │ └── github_create_team.py
│ ├── github_delete_branch/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_delete_branch.json
│ │ └── github_delete_branch.py
│ ├── github_get_branch/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_get_branch.json
│ │ └── github_get_branch.py
│ ├── github_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_get_handle.json
│ │ └── github_get_handle.py
│ ├── github_get_issue/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_get_issue.json
│ │ └── github_get_issue.py
│ ├── github_get_open_branches/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_get_open_branches.json
│ │ └── github_get_open_branches.py
│ ├── github_get_open_pull_requests/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_get_open_pull_requests.json
│ │ └── github_get_open_pull_requests.py
│ ├── github_get_pull_request/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_get_pull_request.json
│ │ └── github_get_pull_request.py
│ ├── github_get_team/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_get_team.json
│ │ └── github_get_team.py
│ ├── github_get_user/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_get_user.json
│ │ └── github_get_user.py
│ ├── github_invite_user_to_org/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_invite_user_to_org.json
│ │ └── github_invite_user_to_org.py
│ ├── github_issue_comment/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_issue_comment.json
│ │ └── github_issue_comment.py
│ ├── github_list_open_issues/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_list_open_issues.json
│ │ └── github_list_open_issues.py
│ ├── github_list_org_members/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_list_org_members.json
│ │ └── github_list_org_members.py
│ ├── github_list_pull_request_commits/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_list_pull_request_commits.json
│ │ └── github_list_pull_request_commits.py
│ ├── github_list_pull_request_reviewers/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_list_pull_request_reviewers.json
│ │ └── github_list_pull_request_reviewers.py
│ ├── github_list_pull_requests/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_list_pull_requests.json
│ │ └── github_list_pull_requests.py
│ ├── github_list_stale_issues/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_list_stale_issues.json
│ │ └── github_list_stale_issues.py
│ ├── github_list_stale_pull_requests/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_list_stale_pull_requests.json
│ │ └── github_list_stale_pull_requests.py
│ ├── github_list_stargazers/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_list_stargazers.json
│ │ └── github_list_stargazers.py
│ ├── github_list_team_members/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_list_team_members.json
│ │ └── github_list_team_members.py
│ ├── github_list_team_repos/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_list_team_repos.json
│ │ └── github_list_team_repos.py
│ ├── github_list_teams_in_org/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_list_teams_in_org.json
│ │ └── github_list_teams_in_org.py
│ ├── github_list_webhooks/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_list_webhooks.json
│ │ └── github_list_webhooks.py
│ ├── github_merge_pull_request/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── github_merge_pull_request.json
│ │ └── github_merge_pull_request.py
│ └── github_remove_member_from_org/
│ ├── README.md
│ ├── __init__.py
│ ├── github_remove_member_from_org.json
│ └── github_remove_member_from_org.py
├── Grafana/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── grafana_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── grafana_get_handle.json
│ │ └── grafana_get_handle.py
│ └── grafana_list_alerts/
│ ├── README.md
│ ├── __init__.py
│ ├── grafana_list_alerts.json
│ └── grafana_list_alerts.py
├── Hadoop/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── hadoop_get_cluster_apps/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── hadoop_get_cluster_apps.json
│ │ └── hadoop_get_cluster_apps.py
│ ├── hadoop_get_cluster_appstatistics/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── hadoop_get_cluster_appstatistics.json
│ │ └── hadoop_get_cluster_appstatistics.py
│ ├── hadoop_get_cluster_metrics/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── hadoop_get_cluster_metrics.json
│ │ └── hadoop_get_cluster_metrics.py
│ ├── hadoop_get_cluster_nodes/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── hadoop_get_cluster_nodes.json
│ │ └── hadoop_get_cluster_nodes.py
│ └── hadoop_get_handle/
│ ├── README.md
│ ├── __init__.py
│ ├── hadoop_get_handle.json
│ └── hadoop_get_handle.py
├── Jenkins/
│ ├── Fetch_Jenkins_Build_Logs.ipynb
│ ├── Fetch_Jenkins_Build_Logs.json
│ ├── README.md
│ └── legos/
│ ├── __init__.py
│ ├── jenkins_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── jenkins_get_handle.json
│ │ └── jenkins_get_handle.py
│ ├── jenkins_get_logs/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── jenkins_get_logs.json
│ │ └── jenkins_get_logs.py
│ └── jenkins_get_plugins/
│ ├── README.md
│ ├── __init__.py
│ ├── jenkins_get_plugins.json
│ └── jenkins_get_plugins.py
├── Jira/
│ ├── README.md
│ ├── __init__.py
│ ├── jira_visualize_time_to_resolution.ipynb
│ ├── jira_visualize_time_to_resolution.json
│ └── legos/
│ ├── __init__.py
│ ├── jira_add_comment/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── jira_add_comment.json
│ │ └── jira_add_comment.py
│ ├── jira_assign_issue/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── jira_assign_issue.json
│ │ └── jira_assign_issue.py
│ ├── jira_create_issue/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── jira_create_issue.json
│ │ └── jira_create_issue.py
│ ├── jira_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── jira_get_handle.json
│ │ └── jira_get_handle.py
│ ├── jira_get_issue/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── jira_get_issue.json
│ │ └── jira_get_issue.py
│ ├── jira_get_issue_status/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── jira_get_issue_status.json
│ │ └── jira_get_issue_status.py
│ ├── jira_issue_change_status/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── jira_issue_change_status.json
│ │ └── jira_issue_change_status.py
│ └── jira_search_issue/
│ ├── README.md
│ ├── __init__.py
│ ├── jira_search_issue.json
│ └── jira_search_issue.py
├── Kafka/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── kafka_broker_health_check/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── kafka_broker_health_check.json
│ │ └── kafka_broker_health_check.py
│ ├── kafka_check_in_sync_replicas/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── kafka_check_in_sync_replicas.json
│ │ └── kafka_check_in_sync_replicas.py
│ ├── kafka_check_lag_change/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── kafka_check_lag_change.json
│ │ └── kafka_check_lag_change.py
│ ├── kafka_check_offline_partitions/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── kafka_check_offline_partitions.json
│ │ └── kafka_check_offline_partitions.py
│ ├── kafka_check_replicas_available/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── kafka_check_replicas_available.json
│ │ └── kafka_check_replicas_available.py
│ ├── kafka_get_committed_messages_count/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── kafka_get_committed_messages_count.json
│ │ └── kafka_get_committed_messages_count.py
│ ├── kafka_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── kafka_get_handle.json
│ │ └── kafka_get_handle.py
│ ├── kafka_get_topic_health/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── kafka_get_topic_health.json
│ │ └── kafka_get_topic_health.py
│ ├── kafka_get_topics_with_lag/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── kafka_get_topics_with_lag.json
│ │ └── kafka_get_topics_with_lag.py
│ ├── kafka_publish_message/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── kafka_publish_message.json
│ │ └── kafka_publish_message.py
│ ├── kafka_run_command/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── kafka_run_command.json
│ │ └── kafka_run_command.py
│ └── kafka_topic_partition_health_check/
│ ├── README.md
│ ├── __init__.py
│ ├── kafka_topic_partition_health_check.json
│ └── kafka_topic_partition_health_check.py
├── Keycloak/
│ ├── __init__.py
│ └── legos/
│ ├── keycloak_get_audit_report/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── keycloak_get_audit_report.json
│ │ └── keycloak_get_audit_report.py
│ ├── keycloak_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── keycloak_get_handle.json
│ │ └── keycloak_get_handle.py
│ └── keycloak_get_service_health/
│ ├── README.md
│ ├── __init__.py
│ ├── keycloak_get_service_health.json
│ └── keycloak_get_service_health.py
├── Kubernetes/
│ ├── Delete_Evicted_Pods_From_Namespaces.ipynb
│ ├── Delete_Evicted_Pods_From_Namespaces.json
│ ├── Get_Kube_System_Config_Map.ipynb
│ ├── Get_Kube_System_Config_Map.json
│ ├── K8S_Delete_Pods_From_Failing_Jobs.ipynb
│ ├── K8S_Delete_Pods_From_Failing_Jobs.json
│ ├── K8S_Deployment_with_multiple_restarts.ipynb
│ ├── K8S_Deployment_with_multiple_restarts.json
│ ├── K8S_Get_Candidate_Nodes_Given_Config.ipynb
│ ├── K8S_Get_Candidate_Nodes_Given_Config.json
│ ├── K8S_Log_Healthcheck.ipynb
│ ├── K8S_Log_Healthcheck.json
│ ├── K8S_Pod_Stuck_In_CrashLoopBack_State.ipynb
│ ├── K8S_Pod_Stuck_In_CrashLoopBack_State.json
│ ├── K8S_Pod_Stuck_In_ImagePullBackOff_State.ipynb
│ ├── K8S_Pod_Stuck_In_ImagePullBackOff_State.json
│ ├── K8S_Pod_Stuck_In_ImagePullBackOff_State_with_genai.ipynb
│ ├── K8S_Pod_Stuck_In_ImagePullBackOff_State_with_genai.json
│ ├── K8S_Pod_Stuck_In_Terminating_State.ipynb
│ ├── K8S_Pod_Stuck_In_Terminating_State.json
│ ├── README.md
│ ├── Resize_List_of_PVCs.ipynb
│ ├── Resize_List_of_PVCs.json
│ ├── Resize_PVC.ipynb
│ ├── Resize_PVC.json
│ ├── Rollback_k8s_Deployment_and_Update_Jira.ipynb
│ ├── Rollback_k8s_Deployment_and_Update_Jira.json
│ ├── __init__.py
│ └── legos/
│ ├── README.md
│ ├── __init__.py
│ ├── k8s_add_node_to_cluster/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_add_node_to_cluster.json
│ │ └── k8s_add_node_to_cluster.py
│ ├── k8s_change_pvc_size/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_change_pvc_size.json
│ │ └── k8s_change_pvc_size.py
│ ├── k8s_check_cronjob_pod_status/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_check_cronjob_pod_status.json
│ │ └── k8s_check_cronjob_pod_status.py
│ ├── k8s_check_service_pvc_utilization/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_check_service_pvc_utilization.json
│ │ └── k8s_check_service_pvc_utilization.py
│ ├── k8s_check_service_status/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_check_service_status.json
│ │ └── k8s_check_service_status.py
│ ├── k8s_check_worker_cpu_utilization/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_check_worker_cpu_utilization.json
│ │ └── k8s_check_worker_cpu_utilization.py
│ ├── k8s_delete_pod/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_delete_pod.json
│ │ └── k8s_delete_pod.py
│ ├── k8s_delete_pvc/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_delete_pvc.json
│ │ └── k8s_delete_pvc.py
│ ├── k8s_describe_node/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_describe_node.json
│ │ └── k8s_describe_node.py
│ ├── k8s_describe_pod/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_describe_pod.json
│ │ └── k8s_describe_pod.py
│ ├── k8s_detect_service_crashes/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_detect_service_crashes.json
│ │ └── k8s_detect_service_crashes.py
│ ├── k8s_exec_command_on_pod/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_exec_command_on_pod.json
│ │ └── k8s_exec_command_on_pod.py
│ ├── k8s_exec_command_on_pods_and_filter/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_exec_command_on_pods_and_filter.json
│ │ └── k8s_exec_command_on_pods_and_filter.py
│ ├── k8s_execute_helm_command/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_execute_helm_command.json
│ │ └── k8s_execute_helm_command.py
│ ├── k8s_execute_local_script_on_a_pod/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_execute_local_script_on_a_pod.json
│ │ └── k8s_execute_local_script_on_a_pod.py
│ ├── k8s_gather_data_for_pod_troubleshoot/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_gather_data_for_pod_troubleshoot.json
│ │ └── k8s_gather_data_for_pod_troubleshoot.py
│ ├── k8s_gather_data_for_service_troubleshoot/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_gather_data_for_service_troubleshoot.json
│ │ └── k8s_gather_data_for_service_troubleshoot.py
│ ├── k8s_get_all_evicted_pods_from_namespace/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_all_evicted_pods_from_namespace.json
│ │ └── k8s_get_all_evicted_pods_from_namespace.py
│ ├── k8s_get_all_pods/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_all_pods.json
│ │ └── k8s_get_all_pods.py
│ ├── k8s_get_all_resources_utilization_info/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_all_resources_utilization_info.json
│ │ └── k8s_get_all_resources_utilization_info.py
│ ├── k8s_get_candidate_nodes_for_pods/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_candidate_nodes_for_pods.json
│ │ └── k8s_get_candidate_nodes_for_pods.py
│ ├── k8s_get_cluster_health/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_cluster_health.json
│ │ └── k8s_get_cluster_health.py
│ ├── k8s_get_config_map_kube_system/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_config_map_kube_system.json
│ │ └── k8s_get_config_map_kube_system.py
│ ├── k8s_get_deployment/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_deployment.json
│ │ └── k8s_get_deployment.py
│ ├── k8s_get_deployment_status/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_deployment_status.json
│ │ └── k8s_get_deployment_status.py
│ ├── k8s_get_error_pods_from_all_jobs/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_error_pods_from_all_jobs.json
│ │ └── k8s_get_error_pods_from_all_jobs.py
│ ├── k8s_get_expiring_cluster_certificate/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_expiring_cluster_certificate.json
│ │ └── k8s_get_expiring_cluster_certificate.py
│ ├── k8s_get_expiring_tls_secret_certificates/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_expiring_tls_secret_certificates.json
│ │ └── k8s_get_expiring_tls_secret_certificates.py
│ ├── k8s_get_failed_deployments/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_failed_deployments.json
│ │ └── k8s_get_failed_deployments.py
│ ├── k8s_get_frequently_restarting_pods/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_frequently_restarting_pods.json
│ │ └── k8s_get_frequently_restarting_pods.py
│ ├── k8s_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_handle.json
│ │ └── k8s_get_handle.py
│ ├── k8s_get_healthy_pods/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_healthy_pods.json
│ │ └── k8s_get_healthy_pods.py
│ ├── k8s_get_memory_utilization_of_services/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_memory_utilization_of_services.json
│ │ └── k8s_get_memory_utilization_of_services.py
│ ├── k8s_get_node_status_and_resource_utilization/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_node_status_and_resource_utilization.json
│ │ └── k8s_get_node_status_and_resource_utilization.py
│ ├── k8s_get_nodes/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_nodes.json
│ │ └── k8s_get_nodes.py
│ ├── k8s_get_nodes_pressure/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_nodes_pressure.json
│ │ └── k8s_get_nodes_pressure.py
│ ├── k8s_get_nodes_with_insufficient_resources/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_nodes_with_insufficient_resources.json
│ │ └── k8s_get_nodes_with_insufficient_resources.py
│ ├── k8s_get_offline_nodes/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_offline_nodes.json
│ │ └── k8s_get_offline_nodes.py
│ ├── k8s_get_oomkilled_pods/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_oomkilled_pods.json
│ │ └── k8s_get_oomkilled_pods.py
│ ├── k8s_get_pending_pods/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_pending_pods.json
│ │ └── k8s_get_pending_pods.py
│ ├── k8s_get_pod_config/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_pod_config.json
│ │ └── k8s_get_pod_config.py
│ ├── k8s_get_pod_logs/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_pod_logs.json
│ │ └── k8s_get_pod_logs.py
│ ├── k8s_get_pod_logs_and_filter/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_pod_logs_and_filter.json
│ │ └── k8s_get_pod_logs_and_filter.py
│ ├── k8s_get_pod_status/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_pod_status.json
│ │ └── k8s_get_pod_status.py
│ ├── k8s_get_pods_attached_to_pvc/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_pods_attached_to_pvc.json
│ │ └── k8s_get_pods_attached_to_pvc.py
│ ├── k8s_get_pods_in_crashloopbackoff_state/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_pods_in_crashloopbackoff_state.json
│ │ └── k8s_get_pods_in_crashloopbackoff_state.py
│ ├── k8s_get_pods_in_imagepullbackoff_state/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_pods_in_imagepullbackoff_state.json
│ │ └── k8s_get_pods_in_imagepullbackoff_state.py
│ ├── k8s_get_pods_in_not_running_state/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_pods_in_not_running_state.json
│ │ └── k8s_get_pods_in_not_running_state.py
│ ├── k8s_get_pods_in_terminating_state/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_pods_in_terminating_state.json
│ │ └── k8s_get_pods_in_terminating_state.py
│ ├── k8s_get_pods_with_high_restart/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_pods_with_high_restart.json
│ │ └── k8s_get_pods_with_high_restart.py
│ ├── k8s_get_service_images/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_service_images.json
│ │ └── k8s_get_service_images.py
│ ├── k8s_get_service_with_no_associated_endpoints/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_service_with_no_associated_endpoints.json
│ │ └── k8s_get_service_with_no_associated_endpoints.py
│ ├── k8s_get_services/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_services.json
│ │ └── k8s_get_services.py
│ ├── k8s_get_unbound_pvcs/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_unbound_pvcs.json
│ │ └── k8s_get_unbound_pvcs.py
│ ├── k8s_get_versioning_info/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_get_versioning_info.json
│ │ └── k8s_get_versioning_info.py
│ ├── k8s_kubectl_command/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_kubectl_command.json
│ │ └── k8s_kubectl_command.py
│ ├── k8s_kubectl_config_set_context/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_kubectl_config_set_context.json
│ │ └── k8s_kubectl_config_set_context.py
│ ├── k8s_kubectl_config_view/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_kubectl_config_view.json
│ │ └── k8s_kubectl_config_view.py
│ ├── k8s_kubectl_delete_pod/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_kubectl_delete_pod.json
│ │ └── k8s_kubectl_delete_pod.py
│ ├── k8s_kubectl_describe_node/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_kubectl_describe_node.json
│ │ └── k8s_kubectl_describe_node.py
│ ├── k8s_kubectl_describe_pod/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_kubectl_describe_pod.json
│ │ └── k8s_kubectl_describe_pod.py
│ ├── k8s_kubectl_drain_node/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_kubectl_drain_node.json
│ │ └── k8s_kubectl_drain_node.py
│ ├── k8s_kubectl_exec_command/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_kubectl_exec_command.json
│ │ └── k8s_kubectl_exec_command.py
│ ├── k8s_kubectl_get_api_resources/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_kubectl_get_api_resources.json
│ │ └── k8s_kubectl_get_api_resources.py
│ ├── k8s_kubectl_get_logs/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_kubectl_get_logs.json
│ │ └── k8s_kubectl_get_logs.py
│ ├── k8s_kubectl_get_service_namespace/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_kubectl_get_service_namespace.json
│ │ └── k8s_kubectl_get_service_namespace.py
│ ├── k8s_kubectl_list_pods/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_kubectl_list_pods.json
│ │ └── k8s_kubectl_list_pods.py
│ ├── k8s_kubectl_patch_pod/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_kubectl_patch_pod.json
│ │ └── k8s_kubectl_patch_pod.py
│ ├── k8s_kubectl_rollout_deployment/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_kubectl_rollout_deployment.json
│ │ └── k8s_kubectl_rollout_deployment.py
│ ├── k8s_kubectl_scale_deployment/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_kubectl_scale_deployment.json
│ │ └── k8s_kubectl_scale_deployment.py
│ ├── k8s_kubectl_show_metrics_node/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_kubectl_show_metrics_node.json
│ │ └── k8s_kubectl_show_metrics_node.py
│ ├── k8s_kubectl_show_metrics_pod/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_kubectl_show_metrics_pod.json
│ │ └── k8s_kubectl_show_metrics_pod.py
│ ├── k8s_list_all_matching_pods/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_list_all_matching_pods.json
│ │ └── k8s_list_all_matching_pods.py
│ ├── k8s_list_pvcs/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_list_pvcs.json
│ │ └── k8s_list_pvcs.py
│ ├── k8s_measure_worker_node_network_bandwidth/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_measure_worker_node_network_bandwidth.json
│ │ └── k8s_measure_worker_node_network_bandwidth.py
│ ├── k8s_remove_pod_from_deployment/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── k8s_remove_pod_from_deployment.json
│ │ └── k8s_remove_pod_from_deployment.py
│ └── k8s_update_command_in_pod_spec/
│ ├── README.md
│ ├── __init__.py
│ ├── k8s_update_command_in_pod_spec.json
│ └── k8s_update_command_in_pod_spec.py
├── License
├── Mantishub/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ └── mantishub_get_handle/
│ ├── README.md
│ ├── mantishub_get_handle.json
│ └── mantishub_get_handle.py
├── Mongo/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── mongodb_add_new_field_in_collections/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_add_new_field_in_collections.json
│ │ └── mongodb_add_new_field_in_collections.py
│ ├── mongodb_aggregate_command/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_aggregate_command.json
│ │ └── mongodb_aggregate_command.py
│ ├── mongodb_atlas_cluster_backup/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_atlas_cluster_backup.json
│ │ └── mongodb_atlas_cluster_backup.py
│ ├── mongodb_check_large_index_size/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_check_large_index_size.json
│ │ └── mongodb_check_large_index_size.py
│ ├── mongodb_compare_disk_size_to_threshold/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_compare_disk_size_to_threshold.json
│ │ └── mongodb_compare_disk_size_to_threshold.py
│ ├── mongodb_count_documents/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_count_documents.json
│ │ └── mongodb_count_documents.py
│ ├── mongodb_create_collection/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_create_collection.json
│ │ └── mongodb_create_collection.py
│ ├── mongodb_create_database/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_create_database.json
│ │ └── mongodb_create_database.py
│ ├── mongodb_delete_collection/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_delete_collection.json
│ │ └── mongodb_delete_collection.py
│ ├── mongodb_delete_database/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_delete_database.json
│ │ └── mongodb_delete_database.py
│ ├── mongodb_delete_document/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_delete_document.json
│ │ └── mongodb_delete_document.py
│ ├── mongodb_distinct_command/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_distinct_command.json
│ │ └── mongodb_distinct_command.py
│ ├── mongodb_find_document/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_find_document.json
│ │ └── mongodb_find_document.py
│ ├── mongodb_find_one/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_find_one.json
│ │ └── mongodb_find_one.py
│ ├── mongodb_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_get_handle.json
│ │ └── mongodb_get_handle.py
│ ├── mongodb_get_metrics/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_get_metrics.json
│ │ └── mongodb_get_metrics.py
│ ├── mongodb_get_replica_set/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_get_replica_set.json
│ │ └── mongodb_get_replica_set.py
│ ├── mongodb_get_server_status/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_get_server_status.json
│ │ └── mongodb_get_server_status.py
│ ├── mongodb_get_write_conflicts/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_get_write_conflicts.json
│ │ └── mongodb_get_write_conflicts.py
│ ├── mongodb_insert_document/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_insert_document.json
│ │ └── mongodb_insert_document.py
│ ├── mongodb_kill_queries/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_kill_queries.json
│ │ └── mongodb_kill_queries.py
│ ├── mongodb_list_collections/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_list_collections.json
│ │ └── mongodb_list_collections.py
│ ├── mongodb_list_databases/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_list_databases.json
│ │ └── mongodb_list_databases.py
│ ├── mongodb_list_queries/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_list_queries.json
│ │ └── mongodb_list_queries.py
│ ├── mongodb_read_query/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_read_query.json
│ │ └── mongodb_read_query.py
│ ├── mongodb_remove_field_in_collections/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_remove_field_in_collections.json
│ │ └── mongodb_remove_field_in_collections.py
│ ├── mongodb_rename_database/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_rename_database.json
│ │ └── mongodb_rename_database.py
│ ├── mongodb_update_document/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mongodb_update_document.json
│ │ └── mongodb_update_document.py
│ └── mongodb_write_query/
│ ├── README.md
│ ├── __init__.py
│ ├── mongodb_write_query.json
│ └── mongodb_write_query.py
├── MsSQL/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── mssql_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mssql_get_handle.json
│ │ └── mssql_get_handle.py
│ ├── mssql_read_query/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mssql_read_query.json
│ │ └── mssql_read_query.py
│ └── mssql_write_query/
│ ├── README.md
│ ├── __init__.py
│ ├── mssql_write_query.json
│ └── mssql_write_query.py
├── MySQL/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── mysql_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mysql_get_handle.json
│ │ └── mysql_get_handle.py
│ ├── mysql_get_long_run_queries/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mysql_get_long_run_queries.json
│ │ └── mysql_get_long_run_queries.py
│ ├── mysql_kill_query/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mysql_kill_query.json
│ │ └── mysql_kill_query.py
│ ├── mysql_read_query/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── mysql_read_query.json
│ │ └── mysql_read_query.py
│ └── mysql_write_query/
│ ├── README.md
│ ├── __init__.py
│ ├── mysql_write_query.json
│ └── mysql_write_query.py
├── Netbox/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── netbox_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── netbox_get_handle.json
│ │ └── netbox_get_handle.py
│ └── netbox_list_devices/
│ ├── README.md
│ ├── __init__.py
│ ├── netbox_list_devices.json
│ └── netbox_list_devices.py
├── Nomad/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── nomad_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── nomad_get_handle.json
│ │ └── nomad_get_handle.py
│ └── nomad_list_jobs/
│ ├── README.md
│ ├── __init__.py
│ ├── nomad_list_jobs.json
│ └── nomad_list_jobs.py
├── Opsgenie/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ └── opsgenie_get_handle/
│ ├── README.md
│ ├── __init__.py
│ ├── opsgenie_get_handle.json
│ └── opsgenie_get_handle.py
├── Pingdom/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── pingdom_create_new_maintenance_window/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── pingdom_create_new_maintenance_window.json
│ │ └── pingdom_create_new_maintenance_window.py
│ ├── pingdom_do_single_check/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── pingdom_do_single_check.json
│ │ └── pingdom_do_single_check.py
│ ├── pingdom_get_analysis/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── pingdom_get_analysis.json
│ │ └── pingdom_get_analysis.py
│ ├── pingdom_get_checkids/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── pingdom_get_checkids.json
│ │ └── pingdom_get_checkids.py
│ ├── pingdom_get_checkids_by_name/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── pingdom_get_checkids_by_name.json
│ │ └── pingdom_get_checkids_by_name.py
│ ├── pingdom_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── pingdom_get_handle.json
│ │ └── pingdom_get_handle.py
│ ├── pingdom_get_maintenance/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── pingdom_get_maintenance.json
│ │ └── pingdom_get_maintenance.py
│ ├── pingdom_get_results/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── pingdom_get_results.json
│ │ └── pingdom_get_results.py
│ ├── pingdom_get_tmscheck/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── pingdom_get_tmscheck.json
│ │ └── pingdom_get_tmscheck.py
│ ├── pingdom_pause_or_unpause_checkids/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── pingdom_pause_or_unpause_checkids.json
│ │ └── pingdom_pause_or_unpause_checkids.py
│ └── pingdom_traceroute/
│ ├── README.md
│ ├── __init__.py
│ ├── pingdom_traceroute.json
│ └── pingdom_traceroute.py
├── Postgresql/
│ ├── Postgresql_Display_Long_Running.ipynb
│ ├── Postgresql_Display_Long_Running.json
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── postgres_calculate_bloat/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── postgres_calculate_bloat.json
│ │ └── postgres_calculate_bloat.py
│ ├── postgresql_call_function/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── postgresql_call_function.json
│ │ └── postgresql_call_function.py
│ ├── postgresql_check_active_connections/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── postgresql_check_active_connections.json
│ │ └── postgresql_check_active_connections.py
│ ├── postgresql_check_locks/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── postgresql_check_locks.json
│ │ └── postgresql_check_locks.py
│ ├── postgresql_check_unused_indexes/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── postgresql_check_unused_indexes.json
│ │ └── postgresql_check_unused_indexes.py
│ ├── postgresql_create_table/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── postgresql_create_table.json
│ │ └── postgresql_create_table.py
│ ├── postgresql_delete_query/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── postgresql_delete_query.json
│ │ └── postgresql_delete_query.py
│ ├── postgresql_get_cache_hit_ratio/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── postgresql_get_cache_hit_ratio.json
│ │ └── postgresql_get_cache_hit_ratio.py
│ ├── postgresql_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── postgresql_get_handle.json
│ │ └── postgresql_get_handle.py
│ ├── postgresql_get_index_usage/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── postgresql_get_index_usage.json
│ │ └── postgresql_get_index_usage.py
│ ├── postgresql_get_server_status/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── postgresql_get_server_status.json
│ │ └── postgresql_get_server_status.py
│ ├── postgresql_handling_transaction/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── postgresql_handling_transaction.json
│ │ └── postgresql_handling_transaction.py
│ ├── postgresql_long_running_queries/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── postgresql_long_running_queries.json
│ │ └── postgresql_long_running_queries.py
│ ├── postgresql_read_query/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── postgresql_read_query.json
│ │ └── postgresql_read_query.py
│ ├── postgresql_show_tables/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── postgresql_show_tables.json
│ │ └── postgresql_show_tables.py
│ ├── postgresql_stored_procedures/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── postgresql_stored_procedures.json
│ │ └── postgresql_stored_procedures.py
│ └── postgresql_write_query/
│ ├── README.md
│ ├── __init__.py
│ ├── postgresql_write_query.json
│ └── postgresql_write_query.py
├── Prometheus/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── prometheus_alerts_list/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── prometheus_alerts_list.json
│ │ └── prometheus_alerts_list.py
│ ├── prometheus_get_all_metrics/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── prometheus_get_all_metrics.json
│ │ └── prometheus_get_all_metrics.py
│ ├── prometheus_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── prometheus_get_handle.json
│ │ └── prometheus_get_handle.py
│ └── prometheus_get_metric_statistics/
│ ├── README.md
│ ├── prometheus_get_metric_statistics.json
│ └── prometheus_get_metric_statistics.py
├── README.md
├── README_extending_docker.md
├── Redis/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── redis_delete_all_keys/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── redis_delete_all_keys.json
│ │ └── redis_delete_all_keys.py
│ ├── redis_delete_keys/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── redis_delete_keys.json
│ │ └── redis_delete_keys.py
│ ├── redis_delete_stale_keys/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── redis_delete_stale_keys.json
│ │ └── redis_delete_stale_keys.py
│ ├── redis_get_cluster_health/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── redis_get_cluster_health.json
│ │ └── redis_get_cluster_health.py
│ ├── redis_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── redis_get_handle.json
│ │ └── redis_get_handle.py
│ ├── redis_get_keys_count/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── redis_get_keys_count.json
│ │ └── redis_get_keys_count.py
│ ├── redis_get_metrics/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── redis_get_metrics.json
│ │ └── redis_get_metrics.py
│ └── redis_list_large_keys/
│ ├── README.md
│ ├── __init__.py
│ ├── redis_list_large_keys.json
│ └── redis_list_large_keys.py
├── Rest/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── rest_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── rest_get_handle.json
│ │ └── rest_get_handle.py
│ └── rest_methods/
│ ├── README.md
│ ├── __init__.py
│ ├── rest_methods.json
│ └── rest_methods.py
├── SSH/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── ssh_execute_remote_command/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── ssh_execute_remote_command.json
│ │ └── ssh_execute_remote_command.py
│ ├── ssh_find_large_files/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── ssh_find_large_files.json
│ │ └── ssh_find_large_files.py
│ ├── ssh_get_ec2_instances_with_low_available_disk_size/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── ssh_get_ec2_instances_with_low_available_disk_size.json
│ │ └── ssh_get_ec2_instances_with_low_available_disk_size.py
│ ├── ssh_get_ec2_instances_with_low_memory_size/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── ssh_get_ec2_instances_with_low_memory_size.json
│ │ └── ssh_get_ec2_instances_with_low_memory_size.py
│ ├── ssh_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── ssh_get_handle.json
│ │ └── ssh_get_handle.py
│ ├── ssh_get_hosts_with_low_disk_latency/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── ssh_get_hosts_with_low_disk_latency.json
│ │ └── ssh_get_hosts_with_low_disk_latency.py
│ ├── ssh_restart_service_using_sysctl/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── ssh_restart_service_using_sysctl.json
│ │ └── ssh_restart_service_using_sysctl.py
│ └── ssh_scp/
│ ├── README.md
│ ├── __init__.py
│ ├── ssh_scp.json
│ └── ssh_scp.py
├── SalesForce/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── salesforce_assign_case/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── salesforce_assign_case.json
│ │ └── salesforce_assign_case.py
│ ├── salesforce_case_change_status/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── salesforce_case_change_status.json
│ │ └── salesforce_case_change_status.py
│ ├── salesforce_create_case/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── salesforce_create_case.json
│ │ └── salesforce_create_case.py
│ ├── salesforce_delete_case/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── salesforce_delete_case.json
│ │ └── salesforce_delete_case.py
│ ├── salesforce_get_case/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── salesforce_get_case.json
│ │ └── salesforce_get_case.py
│ ├── salesforce_get_case_status/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── salesforce_get_case_status.json
│ │ └── salesforce_get_case_status.py
│ ├── salesforce_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── salesforce_get_handle.json
│ │ └── salesforce_get_handle.py
│ ├── salesforce_search_case/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── salesforce_search_case.json
│ │ └── salesforce_search_case.py
│ └── salesforce_update_case/
│ ├── README.md
│ ├── __init__.py
│ ├── salesforce_update_case.json
│ └── salesforce_update_case.py
├── Slack/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── slack_create_channel_invite_users/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── slack_create_channel_invite_users.json
│ │ └── slack_create_channel_invite_users.py
│ ├── slack_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── slack_get_handle.json
│ │ └── slack_get_handle.py
│ ├── slack_lookup_user_by_email/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── slack_lookup_user_by_email.json
│ │ └── slack_lookup_user_by_email.py
│ ├── slack_post_image/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── slack_post_image.json
│ │ └── slack_post_image.py
│ ├── slack_post_message/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── slack_post_message.json
│ │ └── slack_post_message.py
│ └── slack_send_DM/
│ ├── README.md
│ ├── __init__.py
│ ├── slack_send_DM.json
│ └── slack_send_DM.py
├── Snowflake/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── snowflake_read_query/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── snowflake_read_query.json
│ │ └── snowflake_read_query.py
│ └── snowflake_write_query/
│ ├── README.md
│ ├── __init__.py
│ ├── snowflake_write_query.json
│ └── snowflake_write_query.py
├── Splunk/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ └── splunk_get_handle/
│ ├── README.md
│ ├── __init__.py
│ ├── splunk_get_handle.json
│ └── splunk_get_handle.py
├── Stripe/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── stripe_capture_charge/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_capture_charge.json
│ │ └── stripe_capture_charge.py
│ ├── stripe_close_dispute/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_close_dispute.json
│ │ └── stripe_close_dispute.py
│ ├── stripe_create_charge/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_create_charge.json
│ │ └── stripe_create_charge.py
│ ├── stripe_create_customer/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_create_customer.json
│ │ └── stripe_create_customer.py
│ ├── stripe_create_refund/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_create_refund.json
│ │ └── stripe_create_refund.py
│ ├── stripe_delete_customer/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_delete_customer.json
│ │ └── stripe_delete_customer.py
│ ├── stripe_get_all_charges/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_get_all_charges.json
│ │ └── stripe_get_all_charges.py
│ ├── stripe_get_all_customers/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_get_all_customers.json
│ │ └── stripe_get_all_customers.py
│ ├── stripe_get_all_disputes/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_get_all_disputes.json
│ │ └── stripe_get_all_disputes.py
│ ├── stripe_get_all_refunds/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_get_all_refunds.json
│ │ └── stripe_get_all_refunds.py
│ ├── stripe_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_get_handle.json
│ │ └── stripe_get_handle.py
│ ├── stripe_retrieve_charge/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_retrieve_charge.json
│ │ └── stripe_retrieve_charge.py
│ ├── stripe_retrieve_customer/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_retrieve_customer.json
│ │ └── stripe_retrieve_customer.py
│ ├── stripe_retrieve_dispute/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_retrieve_dispute.json
│ │ └── stripe_retrieve_dispute.py
│ ├── stripe_retrieve_refund/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_retrieve_refund.json
│ │ └── stripe_retrieve_refund.py
│ ├── stripe_update_charge/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_update_charge.json
│ │ └── stripe_update_charge.py
│ ├── stripe_update_customer/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_update_customer.json
│ │ └── stripe_update_customer.py
│ ├── stripe_update_dispute/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── stripe_update_dispute.json
│ │ └── stripe_update_dispute.py
│ └── stripe_update_refund/
│ ├── README.md
│ ├── __init__.py
│ ├── stripe_update_refund.json
│ └── stripe_update_refund.py
├── Terraform/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── terraform_exec_command/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── terraform_exec_command.json
│ │ └── terraform_exec_command.py
│ └── terraform_get_handle/
│ ├── README.md
│ ├── __init__.py
│ ├── terraform_get_handle.json
│ └── terraform_get_handle.py
├── Vault/
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── vault_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── vault_get_handle.json
│ │ └── vault_get_handle.py
│ └── vault_get_service_health/
│ ├── README.md
│ ├── __init__.py
│ ├── vault_get_service_health.json
│ └── vault_get_service_health.py
├── Zabbix/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ └── zabbix_get_handle/
│ ├── README.md
│ ├── __init__.py
│ ├── zabbix_get_handle.json
│ └── zabbix_get_handle.py
├── _config.yml
├── all_modules_test.py
├── bin/
│ ├── add_creds.sh
│ └── unskript-add-check.sh
├── build/
│ └── templates/
│ ├── Dockerfile.template
│ ├── GetStarted.ipynb
│ ├── Makefile.extend-docker.template
│ ├── Welcome.ipynb
│ └── Welcome_template.ipynb
├── generate_readme.ipynb
├── generate_readme.nbconvert.ipynb
├── helm/
│ ├── .helmignore
│ ├── full/
│ │ ├── Chart.yaml
│ │ ├── README.md
│ │ ├── templates/
│ │ │ ├── NOTES.txt
│ │ │ ├── _helpers.tpl
│ │ │ ├── deployment.yaml
│ │ │ ├── service.yaml
│ │ │ ├── serviceaccount.yaml
│ │ │ └── statefulset.yaml
│ │ └── values.yaml
│ └── minimal/
│ ├── Chart.yaml
│ ├── README.md
│ ├── templates/
│ │ ├── NOTES.txt
│ │ ├── _helpers.tpl
│ │ ├── deployment.yaml
│ │ ├── service.yaml
│ │ ├── serviceaccount.yaml
│ │ └── statefulset.yaml
│ └── values.yaml
├── infra/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── infra_execute_runbook/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── infra_execute_runbook.json
│ │ └── infra_execute_runbook.py
│ ├── infra_workflow_done/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── infra_workflow_done.json
│ │ └── infra_workflow_done.py
│ ├── workflow_ss_append_keys/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── workflow_ss_append_keys.json
│ │ └── workflow_ss_append_keys.py
│ ├── workflow_ss_create_keys/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── workflow_ss_create_keys.json
│ │ └── workflow_ss_create_keys.py
│ ├── workflow_ss_delete_keys/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── workflow_ss_delete_keys.json
│ │ └── workflow_ss_delete_keys.py
│ ├── workflow_ss_get_keys/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── workflow_ss_get_keys.json
│ │ └── workflow_ss_get_keys.py
│ ├── workflow_ss_rename_keys/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── workflow_ss_rename_keys.json
│ │ └── workflow_ss_rename_keys.py
│ └── workflow_ss_update_keys/
│ ├── README.md
│ ├── __init__.py
│ ├── workflow_ss_update_keys.json
│ └── workflow_ss_update_keys.py
├── lists/
│ ├── Action_list.md
│ ├── action_AIRFLOW.md
│ ├── action_AWS.md
│ ├── action_AWS_ACM.md
│ ├── action_AWS_ASG.md
│ ├── action_AWS_CLI.md
│ ├── action_AWS_CLOUDTRAIL.md
│ ├── action_AWS_CLOUDWATCH.md
│ ├── action_AWS_COST_EXPLORER.md
│ ├── action_AWS_DYNAMODB.md
│ ├── action_AWS_EBC.md
│ ├── action_AWS_EBS.md
│ ├── action_AWS_EC2.md
│ ├── action_AWS_ECS.md
│ ├── action_AWS_EKS.md
│ ├── action_AWS_ELASTICACHE.md
│ ├── action_AWS_ELASTICCACHE.md
│ ├── action_AWS_ELB.md
│ ├── action_AWS_EMR.md
│ ├── action_AWS_IAM.md
│ ├── action_AWS_LAMBDA.md
│ ├── action_AWS_LOGS.md
│ ├── action_AWS_NAT_GATEWAY.md
│ ├── action_AWS_POSTGRES.md
│ ├── action_AWS_RDS.md
│ ├── action_AWS_REDSHIFT.md
│ ├── action_AWS_ROUTE53.md
│ ├── action_AWS_S3.md
│ ├── action_AWS_SECRET_MANAGER.md
│ ├── action_AWS_SQS.md
│ ├── action_AWS_SSM.md
│ ├── action_AWS_STS.md
│ ├── action_AWS_VPC.md
│ ├── action_AZURE.md
│ ├── action_CHATGPT.md
│ ├── action_CLOUDOPS.md
│ ├── action_COST_OPT,CATEGORY_TYPE_SRE.md
│ ├── action_COST_OPT.md
│ ├── action_DATADOG.md
│ ├── action_DATADOG_ALERTS.md
│ ├── action_DATADOG_EVENT.md
│ ├── action_DATADOG_INCIDENT.md
│ ├── action_DATADOG_METRICS.md
│ ├── action_DATADOG_MONITOR.md
│ ├── action_DB.md
│ ├── action_DEVOPS.md
│ ├── action_EBS.md
│ ├── action_ECS.md
│ ├── action_ES.md
│ ├── action_GCP.md
│ ├── action_GCP_BUCKET.md
│ ├── action_GCP_FILE_STORE.md
│ ├── action_GCP_GKE.md
│ ├── action_GCP_IAM.md
│ ├── action_GCP_SECRET.md
│ ├── action_GCP_SHEETS.md
│ ├── action_GCP_STORAGE.md
│ ├── action_GCP_VM.md
│ ├── action_GCP_VMS.md
│ ├── action_GCP_VPC.md
│ ├── action_GITHUB.md
│ ├── action_GITHUB_ISSUE.md
│ ├── action_GITHUB_ORG.md
│ ├── action_GITHUB_PR.md
│ ├── action_GITHUB_REPO.md
│ ├── action_GITHUB_TEAM.md
│ ├── action_GITHUB_USER.md
│ ├── action_GRAFANA.md
│ ├── action_HADOOP.md
│ ├── action_IAM.md
│ ├── action_INFRA.md
│ ├── action_JENKINS.md
│ ├── action_JIRA.md
│ ├── action_K8S.md
│ ├── action_K8S_CLUSTER.md
│ ├── action_K8S_KUBECTL.md
│ ├── action_K8S_NAMESPACE.md
│ ├── action_K8S_NODE.md
│ ├── action_K8S_POD.md
│ ├── action_K8S_PVC.md
│ ├── action_KAFKA.md
│ ├── action_MANTISHUB.md
│ ├── action_MONGODB.md
│ ├── action_MONGODB_CLUSTER.md
│ ├── action_MONGODB_COLLECTION.md
│ ├── action_MONGODB_DOCUMENT.md
│ ├── action_MONGODB_QUERY.md
│ ├── action_MSSQL.md
│ ├── action_MSSQL_QUERY.md
│ ├── action_MYSQL.md
│ ├── action_MYSQL_QUERY.md
│ ├── action_NETBOX.md
│ ├── action_NOMAD.md
│ ├── action_OPENSEARCH.md
│ ├── action_PINGDOM.md
│ ├── action_POSTGRESQL.md
│ ├── action_POSTGRESQL_QUERY.md
│ ├── action_POSTGRESQL_TABLE.md
│ ├── action_PROMETHEUS.md
│ ├── action_REDIS.md
│ ├── action_REST.md
│ ├── action_SALESFORCE.md
│ ├── action_SECOPS.md
│ ├── action_SLACK.md
│ ├── action_SNOWFLAKE.md
│ ├── action_SPLUNK.md
│ ├── action_SRE.md
│ ├── action_SSH.md
│ ├── action_STRIPE.md
│ ├── action_STRIPE_CHARGE.md
│ ├── action_STRIPE_DISPUTE.md
│ ├── action_STRIPE_REFUND.md
│ ├── action_TERRAFORM.md
│ ├── action_TROUBLESHOOTING.md
│ ├── action_ZABBIX.md
│ ├── runbook_CLOUDOPS.md
│ ├── runbook_COST_OPT.md
│ ├── runbook_DEVOPS.md
│ ├── runbook_IAM.md
│ ├── runbook_SECOPS.md
│ ├── runbook_SRE.md
│ ├── runbook_TROUBLESHOOTING.md
│ └── xRunBook_list.md
├── opensearch/
│ ├── README.md
│ ├── __init__.py
│ └── legos/
│ ├── __init__.py
│ ├── opensearch_get_handle/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── opensearch_get_handle.json
│ │ └── opensearch_get_handle.py
│ └── opensearch_search/
│ ├── README.md
│ ├── __init__.py
│ ├── opensearch_search.json
│ └── opensearch_search.py
├── region_test.py
├── sanitize.py
├── suites/
│ ├── AWS_RDS.json
│ ├── Cost_optimization.json
│ ├── K8s_connectivity.json
│ ├── K8s_pods.json
│ ├── K8s_runtime.json
│ ├── aws_ec2.json
│ ├── aws_lambbdas.json
│ └── aws_loadbalancer.json
├── templates/
│ ├── README.md
│ ├── legos/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── lego.json
│ │ └── lego.py
│ ├── runbooks/
│ │ ├── StartHere.ipynb
│ │ ├── gcp.ipynb
│ │ └── k8s.ipynb
│ └── scheduler.template
├── tools/
│ ├── README.md
│ └── runbook-sa/
│ ├── runbook_sa.py
│ └── runbook_sa.sh
├── unskript-ctl/
│ ├── DESIGN.md
│ ├── README.md
│ ├── add_creds.py
│ ├── bash_completion_unskript_ctl.bash
│ ├── config/
│ │ └── unskript_ctl_config.yaml
│ ├── config_parser_test_matrix.md
│ ├── creds_ui.py
│ ├── diagnostics.py
│ ├── diagnostics_worker.py
│ ├── docs/
│ │ └── design.puml
│ ├── stub_creds.json
│ ├── templates/
│ │ ├── check.py.template
│ │ ├── check_test.py.template
│ │ ├── first_cell_content.j2
│ │ ├── last_cell_content.j2
│ │ ├── template_info_lego.j2
│ │ ├── template_script.j2
│ │ └── timeout_handler.j2
│ ├── tests/
│ │ ├── test_database.py
│ │ ├── test_errors.log
│ │ ├── test_notification.py
│ │ └── test_unskript_factory.py
│ ├── unskript-add-check.py
│ ├── unskript-ctl.sh
│ ├── unskript_audit_cleanup.py
│ ├── unskript_ctl_config_parser.py
│ ├── unskript_ctl_custom_notification.py
│ ├── unskript_ctl_database.py
│ ├── unskript_ctl_factory.py
│ ├── unskript_ctl_main.py
│ ├── unskript_ctl_notification.py
│ ├── unskript_ctl_run.py
│ ├── unskript_ctl_upload_session_logs.py
│ ├── unskript_ctl_version.py
│ ├── unskript_db_schema.json
│ ├── unskript_email_notify_check_schema.json
│ ├── unskript_slack_notify_schema.json
│ ├── unskript_upload_results_to_s3.py
│ └── unskript_utils.py
├── validator.py
└── xrunbooks-directory.md
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/Anatomy-of-Runbook.md
================================================
[
](https://unskript.com/assets/favicon.png)
Runbook Definition
In a computer system or network, a Runbook is a compilation of routine procedures and operations that the system administrator or operator carries out. System administrators in IT department and NOCs use Runbooks as a reference.
- Wikipedia (https://en.wikipedia.org/wiki/Runbook)
CloudOps automation made simple!
Visit our blog
·
YouTube Tutorials
.
Report Bug
·
Request Feature
1) List all Expiring Access Key
2) Create AWS Access Key
3) Update AWS Access Key
4) Delete AWS Access Key
Using unSkript's AWS List Expiring Access Keys action we will list those users whose Access Keys past the given threshold number of days i.e. expiring.
\n", "\n", "\n", "Action takes the following parameters:
\n", "threshold_days
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "dae47429-ca5a-4834-bb46-ac9b2a37527f", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_IAM" ], "actionDescription": "List Expiring IAM User Access Keys", "actionEntryFunction": "aws_list_expiring_access_keys", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "924025582b6c1b3ea3c8c834f1ee430a2df8bd42c5119191cb5c5da3121f1d18" ], "actionNextHopParameterMapping": {}, "actionNouns": [ "expiring", "access", "aws" ], "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS List Expiring Access Keys", "actionType": "LEGO_TYPE_AWS", "actionVerbs": [ "list" ], "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "c3a4e091801f8197429f073a0612e2cc373b6630ce4426d73617b8e101bc5d6a", "collapsed": true, "continueOnError": false, "credentialsJson": {}, "description": "List Expiring IAM User Access Keys", "id": 1, "index": 1, "inputData": [ { "threshold_days": { "constant": false, "value": "int(threshold_days)" } } ], "inputschema": [ { "properties": { "threshold_days": { "default": 90, "description": "Threshold number(in days) to check for expiry. Eg: 30", "title": "Threshold Days", "type": "integer" } }, "required": [], "title": "aws_list_expiring_access_keys", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS List Expiring Access Keys", "orderProperties": [ "threshold_days" ], "outputParams": { "output_name": "expiring_users", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "tags": [ "aws_list_expiring_access_keys" ], "uuid": "c3a4e091801f8197429f073a0612e2cc373b6630ce4426d73617b8e101bc5d6a", "version": "1.0.0" }, "outputs": [], "source": [ "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "import pprint\n", "from typing import Tuple\n", "import datetime\n", "import dateutil\n", "from pydantic import BaseModel, Field\n", "from unskript.legos.aws.aws_list_all_iam_users.aws_list_all_iam_users import aws_list_all_iam_users\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_list_expiring_access_keys_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def aws_list_expiring_access_keys(handle, threshold_days: int = 90)-> Tuple:\n", " \"\"\"aws_list_expiring_access_keys returns all the ACM issued certificates which are\n", " about to expire given a threshold number of days\n", "\n", " :type handle: object\n", " :param handle: Object returned from Task Validate\n", "\n", " :type threshold_days: int\n", " :param threshold_days: Threshold number of days to check for expiry. Eg: 30 -lists\n", " all access Keys which are expiring within 30 days\n", "\n", " :rtype: Status, List of expiring access keys and Error if any\n", " \"\"\"\n", " result =[]\n", " all_users=[]\n", " try:\n", " all_users = aws_list_all_iam_users(handle=handle)\n", " except Exception as error:\n", " raise error\n", "\n", " for each_user in all_users:\n", " try:\n", " iamClient = handle.client('iam')\n", " final_result={}\n", " response = iamClient.list_access_keys(UserName=each_user)\n", " for x in response[\"AccessKeyMetadata\"]:\n", " if len(response[\"AccessKeyMetadata\"])!= 0:\n", " create_date = x[\"CreateDate\"]\n", " right_now = datetime.datetime.now(dateutil.tz.tzlocal())\n", " diff = right_now-create_date\n", " days_remaining = diff.days\n", " if days_remaining > threshold_days:\n", " final_result[\"username\"] = x[\"UserName\"]\n", " final_result[\"access_key_id\"] = x[\"AccessKeyId\"]\n", " if len(final_result)!=0:\n", " result.append(final_result)\n", " except Exception as e:\n", " raise e\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"threshold_days\": \"int(threshold_days)\"\n", " }''')\n", "task.configure(outputName=\"expiring_users\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_list_expiring_access_keys, lego_printer=aws_list_expiring_access_keys_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "af12fb80-4786-4dc6-b1b9-c7fdc372563e", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 1A", "orderProperties": [], "tags": [], "title": "Step 1A" }, "source": [ "Action captures the following output:
\n", "expiring_users
This action lists the usernames of expiring Access Keys using the output from Step 2.
" ] }, { "cell_type": "code", "execution_count": 21, "id": "3828def9-f4b1-4e75-9f1b-6b70fed35ae8", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-08T10:36:03.614Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Object of Expiring Users", "orderProperties": [], "tags": [], "title": "Create List of Object of Expiring Users" }, "outputs": [], "source": [ "all_expiring_users = []\n", "if expiring_users[0] == False:\n", " if len(expiring_users[1])!=0:\n", " all_expiring_users=expiring_users[1]\n", "print(all_expiring_users)" ] }, { "cell_type": "markdown", "id": "e1956e5a-c097-4dd7-a0da-ae45fc98c4db", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 1B", "orderProperties": [], "tags": [], "title": "Step 1B" }, "source": [ "This action simply creates another list containing a dictionary of the user and their old access key. The output from this acion is required for Step 4 and Step 5.
" ] }, { "cell_type": "code", "execution_count": 22, "id": "a407845b-41f9-4ca5-9387-a2cfb0e6e46f", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-08T10:36:12.088Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Expiring Users", "orderProperties": [], "tags": [], "title": "Create List of Expiring Users" }, "outputs": [], "source": [ "expiring_usernames = []\n", "for each_user in all_expiring_users:\n", " for k,v in each_user.items():\n", " if k=='username':\n", " expiring_usernames.append(v)\n", "print(expiring_usernames)\n", "task.configure(outputName=\"expiring_usernames\")" ] }, { "cell_type": "markdown", "id": "b3a132e5-42c9-46a2-9788-9f8648dc71f6", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "Using unSkript's AWS Create Access Key action we will create a new Access Key for the users from Step 2.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "f75a8726-d158-4afd-a667-0abd6f9717dc", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_IAM", "CATEGORY_TYPE_IAM" ], "actionDescription": "Create a new Access Key for the User", "actionEntryFunction": "aws_create_access_key", "actionIsCheck": false, "actionIsRemediation": true, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Create Access Key", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "492b9b6807e5254512715555e3ec52a97e006c04a28511710e5bc1b0c45ffdd7", "collapsed": true, "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "Create a new Access Key for the User", "id": 1, "index": 1, "inputData": [ { "aws_username": { "constant": false, "value": "iter_item" } } ], "inputschema": [ { "properties": { "aws_username": { "description": "Username of the IAM User", "title": "Username", "type": "string" } }, "required": [ "aws_username" ], "title": "aws_create_access_key", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": "aws_username", "iter_list": { "constant": false, "objectItems": false, "value": "expiring_usernames" } } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Create Access Key", "orderProperties": [ "aws_username" ], "outputParams": { "output_name": "new_access_keys", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "len(expiring_usernames)!=0", "tags": [ "aws_create_access_key" ], "uuid": "492b9b6807e5254512715555e3ec52a97e006c04a28511710e5bc1b0c45ffdd7", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "import pprint\n", "from typing import Dict\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_create_access_key_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_create_access_key(\n", " handle,\n", " aws_username: str\n", ") -> Dict:\n", " \"\"\"aws_create_access_key creates a new access key for the given user.\n", " :type handle: object\n", " :param handle: Object returned from Task Validate\n", "\n", " :type aws_username: str\n", " :param aws_username: Username of the IAM user to be looked up\n", "\n", " :rtype: Result Dictionary of result\n", " \"\"\"\n", " iamClient = handle.client('iam')\n", " result = iamClient.create_access_key(UserName=aws_username)\n", " retVal = {}\n", " temp_list = []\n", " for key, value in result.items():\n", " if key not in temp_list:\n", " temp_list.append(key)\n", " retVal[key] = value\n", " return retVal\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"aws_username\": \"iter_item\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"expiring_usernames\",\n", " \"iter_parameter\": \"aws_username\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(expiring_usernames)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"new_access_keys\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_create_access_key, lego_printer=aws_create_access_key_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "e6797aa7-a0c2-4842-8482-da22a5363fe8", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 3", "orderProperties": [], "tags": [], "title": "Step 3" }, "source": [ "Action takes the following parameters:
\n", "aws_username
Using the AWS Update Access Key action we will update the status of the old Access Key to \"Inactive\". This step is required to delete the old access key as one user cannot have two Access Keys.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "aef7b261-1f5a-4402-ae02-22841fc4569b", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_IAM" ], "actionDescription": "Update status of the Access Key", "actionEntryFunction": "aws_update_access_key", "actionIsCheck": false, "actionIsRemediation": true, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Update Access Key", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "0297f6c80f0416d10484fa2593510515eef2900add97924e3e73beaab5fea819", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "Update status of the Access Key", "id": 1, "index": 1, "inputData": [ { "aws_access_key_id": { "constant": false, "value": "\"iter.get(\\\\\"access_key_id\\\\\")\"" }, "aws_username": { "constant": false, "value": "\"iter.get(\\\\\"username\\\\\")\"" }, "status": { "constant": true, "value": "Inactive" } } ], "inputschema": [ { "definitions": { "AccessKeyStatus": { "description": "An enumeration.", "enum": [ "Active", "Inactive" ], "title": "AccessKeyStatus", "type": "string" } }, "properties": { "aws_access_key_id": { "description": "Old Access Key ID of the User", "title": "Access Key ID", "type": "string" }, "aws_username": { "description": "Username of the IAM User", "title": "Username", "type": "string" }, "status": { "allOf": [ { "$ref": "#/definitions/AccessKeyStatus" } ], "description": "Status to set for the Access Key", "title": "Status", "type": "enum" } }, "required": [ "aws_username", "aws_access_key_id", "status" ], "title": "aws_update_access_key", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "aws_access_key_id": "access_key_id", "aws_username": "username" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_expiring_users" } } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Update Access Key", "orderProperties": [ "aws_username", "aws_access_key_id", "status" ], "printOutput": true, "startcondition": "len(all_expiring_users)!=0", "tags": [ "aws_update_access_key" ], "uuid": "0297f6c80f0416d10484fa2593510515eef2900add97924e3e73beaab5fea819", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "import pprint\n", "from typing import Dict\n", "from pydantic import BaseModel, Field\n", "from unskript.enums.aws_access_key_enums import AccessKeyStatus\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_update_access_key_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(\"Access Key status successfully changed\")\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_update_access_key(\n", " handle,\n", " aws_username: str,\n", " aws_access_key_id: str,\n", " status: AccessKeyStatus\n", ") -> Dict:\n", " \"\"\"aws_update_access_key updates the status of an access key to Inactive/Active\n", " :type handle: object\n", " :param handle: Object returned from Task Validate\n", "\n", " :type aws_username: str\n", " :param aws_username: Username of the IAM user to be looked up\n", "\n", " :type aws_access_key_id: str\n", " :param aws_access_key_id: Old Access Key ID of the user of which the status\n", " needs to be updated\n", "\n", " :type status: AccessKeyStatus\n", " :param status: Status to set for the Access Key\n", "\n", " :rtype: Result Dictionary of result\n", " \"\"\"\n", " iamClient = handle.client('iam')\n", " result = iamClient.update_access_key(\n", " UserName=aws_username,\n", " AccessKeyId=aws_access_key_id,\n", " Status=status\n", " )\n", " retVal = {}\n", " temp_list = []\n", " for key, value in result.items():\n", " if key not in temp_list:\n", " temp_list.append(key)\n", " retVal[key] = value\n", " return retVal\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=False)\n", "task.configure(inputParamsJson='''{\n", " \"aws_username\": \"\\\\\"iter.get(\\\\\\\\\\\\\"username\\\\\\\\\\\\\")\\\\\"\",\n", " \"aws_access_key_id\": \"\\\\\"iter.get(\\\\\\\\\\\\\"access_key_id\\\\\\\\\\\\\")\\\\\"\",\n", " \"status\": \"AccessKeyStatus.Inactive\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_expiring_users\",\n", " \"iter_parameter\": [\"aws_access_key_id\",\"aws_username\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_expiring_users)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_update_access_key, lego_printer=aws_update_access_key_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "aa81394c-749e-4b32-bf6c-a866369f2cf5", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 4", "orderProperties": [], "tags": [], "title": "Step 4" }, "source": [ "This action takes the following parameters:
\n", "aws_username,aws_access_key_idandstatus
Finally, we will delete the the old (Inactive) Access Key for the IAM Users
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "e12a42d2-1eb8-4737-b0d7-4dd80c688fca", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_IAM", "CATEGORY_TYPE_IAM" ], "actionDescription": "Delete an Access Key for a User", "actionEntryFunction": "aws_delete_access_key", "actionIsCheck": false, "actionIsRemediation": true, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Delete Access Key", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "4ce21d2ac0824cafdddbb4245ffcb1d4c34786ed68c075fb1041eb8c7e22f01d", "collapsed": true, "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "Delete an Access Key for a User", "id": 1, "index": 1, "inputData": [ { "aws_access_key_id": { "constant": false, "value": "\"iter.get(\\\\\"access_key_id\\\\\")\"" }, "aws_username": { "constant": false, "value": "\"iter.get(\\\\\"username\\\\\")\"" } } ], "inputschema": [ { "properties": { "aws_access_key_id": { "description": "Old Access Key ID of the User", "title": "Access Key ID", "type": "string" }, "aws_username": { "description": "Username of the IAM User", "title": "Username", "type": "string" } }, "required": [ "aws_username", "aws_access_key_id" ], "title": "aws_delete_access_key", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "aws_access_key_id": "access_key_id", "aws_username": "username" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_expiring_users" } } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Delete Access Key", "orderProperties": [ "aws_username", "aws_access_key_id" ], "printOutput": true, "startcondition": "len(all_expiring_users)!=0", "tags": [ "aws_delete_access_key" ], "uuid": "4ce21d2ac0824cafdddbb4245ffcb1d4c34786ed68c075fb1041eb8c7e22f01d", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "import pprint\n", "from typing import Dict\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_delete_access_key_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(\"Access Key successfully deleted\")\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_delete_access_key(\n", " handle,\n", " aws_username: str,\n", " aws_access_key_id: str,\n", ") -> Dict:\n", " \"\"\"aws_delete_access_key deleted the given access key.\n", " :type handle: object\n", " :param handle: Object returned from Task Validate\n", "\n", " :type aws_username: str\n", " :param aws_username: Username of the IAM user to be looked up\n", "\n", " :type aws_access_key_id: str\n", " :param aws_access_key_id: Old Access Key ID of the user which needs to be deleted\n", "\n", " :rtype: Result Status Dictionary of result\n", " \"\"\"\n", " iamClient = handle.client('iam')\n", " result = iamClient.delete_access_key(UserName=aws_username, AccessKeyId=aws_access_key_id)\n", " retVal = {}\n", " temp_list = []\n", " for key, value in result.items():\n", " if key not in temp_list:\n", " temp_list.append(key)\n", " retVal[key] = value\n", " return retVal\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"aws_username\": \"iter.get(\\\\\"username\\\\\")\",\n", " \"aws_access_key_id\": \"iter.get(\\\\\"access_key_id\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_expiring_users\",\n", " \"iter_parameter\": [\"aws_username\",\"aws_access_key_id\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_expiring_users)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_delete_access_key, lego_printer=aws_delete_access_key_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "d87557cc-2feb-47ce-89f2-5ee1d7375c88", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "aws_usernameandaws_access_key_id
In this Runbook, we were able to perform AWS Access Key rotation for IAM users whose Access Keys were expiring by using unSkript's AWS actions. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "AWS Access Key Rotation for IAM users", "parameters": [ "threshold_days" ] }, "kernelspec": { "display_name": "unSkript (Build: 1169)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "parameterSchema": { "properties": { "threshold_days": { "description": "Threshold number of days to check if an access key has expired. Eg: 45", "title": "threshold_days", "type": "number" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Access_Key_Rotation.json ================================================ { "name": "AWS Access Key Rotation for IAM users", "description": "This runbook can be used to configure AWS Access Key rotation. Changing access keys (which consist of an access key ID and a secret access key) on a regular schedule is a well-known security best practice because it shortens the period an access key is active and therefore reduces the business impact if they are compromised. Having an established process that is run regularly also ensures the operational steps around key rotation are verified, so changing a key is never a scary step.", "uuid": "a79201f821993867e23dd9603ed7ef5123325353d717c566f902f7ca6e471f5c", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Add_Lifecycle_Policy_To_S3_Buckets.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "5424264e-6195-4cf9-906b-24b02d5a83f3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Find S3 Buckets without Lifecycle Policies
2) Attach Lifecycle Policy
Using Find AWS S3 buckets without lifecycle policieswe can identify buckets that do not have any configured lifecycle rules for managing object lifecycle. By examining the presence or absence of lifecycle policies, you can gain insights into the data management practices of your S3 buckets. This information can be valuable for optimizing storage costs and ensuring efficient data lifecycle management.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region
\n", "" ] }, { "cell_type": "code", "execution_count": 18, "id": "cc2e3052-9a34-4e09-ab57-c868197a5f62", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_S3" ], "actionDescription": "S3 lifecycle policies enable you to automatically transition objects to different storage classes or delete them when they are no longer needed. This action finds all S3 buckets without lifecycle policies. ", "actionEntryFunction": "aws_find_s3_buckets_without_lifecycle_policies", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Find S3 Buckets without Lifecycle Policies", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "01cb410b7247b1803c9d41cfd23853bf405b7a603ef52a9d535ed675ed961909", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "S3 lifecycle policies enable you to automatically transition objects to different storage classes or delete them when they are no longer needed. This action finds all S3 buckets without lifecycle policies. ", "execution_data": { "last_date_success_run_cell": "2023-05-16T08:46:51.456Z" }, "id": 4, "index": 4, "inputData": [ { "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region of S3 buckets.", "title": "Region", "type": "string" } }, "required": [], "title": "aws_find_s3_buckets_without_lifecycle_policies", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Find S3 Buckets without Lifecycle Policies", "orderProperties": [ "region" ], "outputParams": { "output_name": "buckets_without_policy", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not bucket_names", "tags": [ "aws_find_s3_buckets_without_lifecycle_policies" ], "title": "AWS Find S3 Buckets without Lifecycle Policies", "uuid": "01cb410b7247b1803c9d41cfd23853bf405b7a603ef52a9d535ed675ed961909", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "from unskript.legos.aws.aws_get_s3_buckets.aws_get_s3_buckets import aws_get_s3_buckets\n", "from typing import List, Optional, Tuple\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_find_s3_buckets_without_lifecycle_policies_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_find_s3_buckets_without_lifecycle_policies(handle, region: str=\"\") -> Tuple:\n", " \"\"\"aws_find_s3_buckets_without_lifecycle_policies List all the S3 buckets without lifecycle policies\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: string\n", " :param region: AWS Region of the bucket\n", "\n", " :rtype: Status, List of all the S3 buckets without lifecycle policies with regions\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", " for reg in all_regions:\n", " try:\n", " s3Session = handle.resource(\"s3\", region_name=reg)\n", " response = aws_get_s3_buckets(handle, region=reg)\n", " for bucket in response:\n", " bucket_region = s3Session.meta.client.get_bucket_location(Bucket=bucket)['LocationConstraint']\n", " if bucket_region == None:\n", " bucket_region = 'us-east-1'\n", " if bucket_region != reg:\n", " continue\n", " bucket_lifecycle_configuration = s3Session.BucketLifecycleConfiguration(bucket)\n", " try:\n", " if bucket_lifecycle_configuration.rules:\n", " continue\n", " except Exception:\n", " bucket_details = {}\n", " bucket_details[\"bucket_name\"] = bucket\n", " bucket_details[\"region\"] = reg\n", " result.append(bucket_details)\n", " except Exception:\n", " pass\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not bucket_names\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"buckets_without_policy\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_find_s3_buckets_without_lifecycle_policies, lego_printer=aws_find_s3_buckets_without_lifecycle_policies_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "199591ef-cb3a-49b7-b515-3c6998050320", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "buckets_without_policy
This action gets the list of S3 buckets from the tuple output in Step 1.
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": 19, "id": "6a10e980-9f17-4436-9166-90ea130aa316", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-05-16T08:46:53.557Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Buckets with No Lifecycle Policy", "orderProperties": [], "tags": [], "title": "Create List of Buckets with No Lifecycle Policy" }, "outputs": [], "source": [ "all_buckets_without_policy = []\n", "try:\n", " for res in buckets_without_policy:\n", " if type(res)==bool:\n", " if res == False:\n", " continue\n", " elif type(res)==list:\n", " if len(res)!=0:\n", " all_buckets_without_policy=res\n", "except Exception:\n", " for buck in bucket_names:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"bucket_name\"] = buck\n", " all_buckets_without_policy.append(data_dict)\n", "print(all_buckets_without_policy)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "978d3b61-2fd9-461d-89bd-534d2dcf3b63", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_buckets_without_policy
This action attached a new lifecycle policy to the S3 buckets found in Step 1. From the listed input parameters, expiration_days and nonconcurrent_days have a default value of 30 days.
\n", "" ] }, { "cell_type": "code", "execution_count": 21, "id": "8fcae72a-d600-4a8a-b103-6fa0afade0f9", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_S3" ], "actionDescription": "Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration.", "actionEntryFunction": "aws_add_lifecycle_configuration_to_s3_bucket", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Add Lifecycle Configuration to AWS S3 Bucket", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "a55496e5f6dcbcdaeb22e734eea5363d34e60fa5c580b252ca16b022c0dbaf8f", "collapsed": true, "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration.", "execution_data": { "last_date_success_run_cell": "2023-05-16T08:47:36.364Z" }, "id": 1, "index": 1, "inputData": [ { "bucket_name": { "constant": false, "value": "\"iter.get(\\\\\"bucket_name\\\\\")\"" }, "expiration_days": { "constant": false, "value": "expiration_days" }, "noncurrent_days": { "constant": false, "value": "noncurrent_days" }, "prefix": { "constant": false, "value": "prefix" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "bucket_name": { "description": "The name of the bucket for which to set the configuration.", "title": "Bucket Name", "type": "string" }, "expiration_days": { "default": 30, "description": "Specifies the expiration for the lifecycle of the object in the form of days. Eg: 30 (days)", "title": "Expiration Days", "type": "number" }, "noncurrent_days": { "default": 30, "description": "Specifies the number of days an object is noncurrent before Amazon S3 permanently deletes the noncurrent object versions", "title": "Noncurrent Days", "type": "number" }, "prefix": { "default": "", "description": "Prefix identifying one or more objects to which the rule applies.", "title": "Prefix", "type": "string" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [ "region", "bucket_name" ], "title": "aws_add_lifecycle_configuration_to_s3_bucket", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "bucket_name": "bucket_name", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_buckets_without_policy" } } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Add Lifecycle Configuration to AWS S3 Bucket", "orderProperties": [ "region", "bucket_name", "expiration_days", "prefix", "noncurrent_days" ], "printOutput": true, "startcondition": "len(all_buckets_without_policy)!=0", "tags": [ "aws_add_lifecycle_configuration_to_s3_bucket" ], "uuid": "a55496e5f6dcbcdaeb22e734eea5363d34e60fa5c580b252ca16b022c0dbaf8f", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Dict, Optional\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_add_lifecycle_configuration_to_s3_bucket_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "\n", "@beartype\n", "def aws_add_lifecycle_configuration_to_s3_bucket(handle, region: str, bucket_name:str, expiration_days:int=30, prefix:str='', noncurrent_days:int=30) -> Dict:\n", " \"\"\"aws_add_lifecycle_configuration_to_s3_bucket returns response of adding lifecycle configuration\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: string\n", " :param region: location of the bucket\n", "\n", " :type bucket_name: string\n", " :param bucket_name: The name of the bucket for which to set the configuration.\n", "\n", " :type expiration_days: int\n", " :param expiration_days: Specifies the expiration for the lifecycle of the object in the form of days. Eg: 30 (days)\n", "\n", " :type prefix: string\n", " :param prefix: location of the bucket\n", "\n", " :type noncurrent_days: int\n", " :param noncurrent_days: Specifies the number of days an object is noncurrent before Amazon S3 permanently deletes the noncurrent object versions.\n", "\n", " :rtype: Dict of the response of adding lifecycle configuration\n", " \"\"\"\n", " s3Client = handle.client(\"s3\", region_name=region)\n", " try:\n", " lifecycle_config = {\n", " 'Rules': [\n", " {\n", " 'Expiration': {\n", " 'Days': expiration_days,\n", " },\n", " 'Filter': {\n", " 'Prefix': ''\n", " },\n", " 'Status': 'Enabled',\n", " 'NoncurrentVersionExpiration': {\n", " 'NoncurrentDays': noncurrent_days\n", " }\n", " }\n", " ]\n", " }\n", " bucket_name = 'testrunbook'\n", " response = s3Client.put_bucket_lifecycle_configuration(\n", " Bucket=bucket_name,\n", " LifecycleConfiguration=lifecycle_config\n", " )\n", " except Exception as e:\n", " raise e\n", " return response\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"expiration_days\": \"int(expiration_days)\",\n", " \"prefix\": \"prefix\",\n", " \"noncurrent_days\": \"int(noncurrent_days)\",\n", " \"bucket_name\": \"iter.get(\\\\\"bucket_name\\\\\")\",\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_buckets_without_policy\",\n", " \"iter_parameter\": [\"bucket_name\",\"region\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_buckets_without_policy)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_add_lifecycle_configuration_to_s3_bucket, lego_printer=aws_add_lifecycle_configuration_to_s3_bucket_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "44a6cf05-385b-445d-a503-ad4aa607a568", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "bucket_name, region, expiration_days, prefix, nonconcurrent_days
In this Runbook, we were able to find AWS S3 buckets without lifecycle policies and attach one to them. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Add Lifecycle Policy to S3 Buckets", "parameters": [ "region", "threshold_days" ] }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "bucket_names": { "description": "List of S3 buckets to attach the lifecycle policy to.", "title": "bucket_names", "type": "array" }, "expiration_days": { "default": 30, "description": "Specifies the expiration of the lifecycle of the S3 bucker. By default it is considered to be 30 days. ", "title": "expiration_days", "type": "number" }, "noncurrent_days": { "default": 30, "description": "Specifies the transition rule for the lifecycle rule that describes when noncurrent objects transition to a specific storage class.", "title": "noncurrent_days", "type": "number" }, "prefix": { "default": " ", "description": "Prefix identifying one or more of the rules that applies to the object", "title": "prefix", "type": "string" }, "region": { "description": "AWS region to find the S3 buckets", "title": "region", "type": "string" } }, "required": [ "region" ], "title": "Schema", "type": "object" }, "parameterValues": {}, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Add_Lifecycle_Policy_To_S3_Buckets.json ================================================ { "name": "Add Lifecycle Policy to S3 Buckets", "description": "Attaching lifecycle policies to AWS S3 buckets enables us to automate the management of object lifecycle in your storage buckets. By configuring lifecycle policies, you can define rules that determine the actions to be taken on objects based on their age or other criteria. This includes transitioning objects to different storage classes, such as moving infrequently accessed data to lower-cost storage tiers or archiving them to Glacier, as well as setting expiration dates for objects. By attaching lifecycle policies to your S3 buckets, you can optimize storage costs by automatically moving data to the most cost-effective storage tier based on its lifecycle. Additionally, it allows you to efficiently manage data retention and comply with regulatory requirements or business policies regarding data expiration. This runbook helps us find all the buckets without any lifecycle policy and attach one to them.", "uuid": "3d74913836e037a001f718b48f1e19010394b90afc2422d0572ab5c515521075", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Add_Mandatory_tags_to_EC2.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "79251bc7-c6cd-4344-a8d5-754bf62eb17e", "metadata": { "jupyter": { "source_hidden": false }, "name": "Enforce Mandatory Tags Across All AWS Resources", "orderProperties": [], "tags": [], "title": "Enforce Mandatory Tags Across All AWS Resources" }, "source": [ "![]()
\n", "
![]()
\n", "
\n", "
1) Get AWS EBS Volume Without GP3 Type
2) Modify EBS Volume to GP3
Using unSkript's Get AWS EBS Volume Without GP3 Type action we will fetch all the EBS Volumes that are not of General Purpose Type-3.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region(Optional)
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "bc4e94de-bb36-4db2-8017-ab96ae205959", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS" ], "actionDescription": "AWS recently introduced the General Purpose SSD (gp3) volume type, which is designed to provide higher IOPS performance at a lower cost than the gp2 volume type.", "actionEntryFunction": "aws_get_ebs_volumes_without_gp3_type", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "2475714639442a9adcd0a87f7d193f6e8a6bbb9537d1eb6b03a6befb8ef84b19" ], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Get AWS EBS Volume Without GP3 Type", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "ef1a281f6f6d0f44406bc73758705fd814b740952f9a82a2735d8db6fb6d834f", "collapsed": true, "condition_enabled": true, "continueOnError": false, "description": "AWS recently introduced the General Purpose SSD (gp3) volume type, which is designed to provide higher IOPS performance at a lower cost than the gp2 volume type.", "id": 4, "index": 4, "inputData": [ { "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [], "title": "aws_get_ebs_volumes_without_gp3_type", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Get AWS EBS Volume Without GP3 Type", "orderProperties": [ "region" ], "outputParams": { "output_name": "all_volumes", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not ebs_volume_ids", "tags": [ "aws_get_ebs_volumes_without_gp3_type" ], "uuid": "ef1a281f6f6d0f44406bc73758705fd814b740952f9a82a2735d8db6fb6d834f", "version": "1.0.0", "credentialsJson": {} }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "import pprint\n", "from typing import Optional, Tuple\n", "from pydantic import BaseModel, Field\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_ebs_volumes_without_gp3_type_printer(output):\n", " if output is None:\n", " return\n", "\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_get_ebs_volumes_without_gp3_type(handle, region: str = \"\") -> Tuple:\n", " \"\"\"aws_get_ebs_volumes_without_gp3_type Returns an array of ebs volumes.\n", "\n", " :type region: string\n", " :param region: Used to filter the volume for specific region.\n", "\n", " :rtype: Tuple with status result and list of EBS Volume without GP3 type.\n", " \"\"\"\n", " result=[]\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", "\n", " for reg in all_regions:\n", " try:\n", " # Filtering the volume by region\n", " ec2Client = handle.resource('ec2', region_name=reg)\n", " volumes = ec2Client.volumes.all()\n", "\n", " # collecting the volumes which has zero attachments\n", " for volume in volumes:\n", " volume_dict = {}\n", " if volume.volume_type != \"gp3\":\n", " volume_dict[\"region\"] = reg\n", " volume_dict[\"volume_id\"] = volume.id\n", " volume_dict[\"volume_type\"] = volume.volume_type\n", " result.append(volume_dict)\n", " except Exception:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not ebs_volume_ids\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"all_volumes\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_ebs_volumes_without_gp3_type, lego_printer=aws_get_ebs_volumes_without_gp3_type_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "da849c25-f2f5-45b0-9502-33e35a7e54a5", "metadata": { "jupyter": { "source_hidden": false }, "name": "Stap 1 Extension", "orderProperties": [], "tags": [], "title": "Stap 1 Extension" }, "source": [ "This action captures the following output:
\n", "all_volumes
This action filters regions that have no ebs volumes without gp3 type .
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "b1284613-b251-4ba3-83a8-db49cfb3bcab", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-04-18T13:26:21.555Z" }, "name": "Create List of Volumes without GP3 Type", "orderProperties": [], "tags": [], "title": "Create List of Volumes without GP3 Type" }, "outputs": [], "source": [ "all_non_gp3_volumes = []\n", "dummy = []\n", "try:\n", " if all_volumes[0] == False:\n", " for volume in all_volumes[1]:\n", " all_non_gp3_volumes.append(volume)\n", "except Exception as e:\n", " if ebs_volume_ids:\n", " for vol_id in ebs_volume_ids:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"volume_id\"] = vol_id\n", " all_non_gp3_volumes.append(data_dict)\n", " else:\n", " raise Exception(e)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "17285759-9cfa-4966-9354-4ff9342b2bd2", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_non_gp3_volumes
Using unSkript's Modify EBS Volume to GP3 action we will modify the EBS volume type to GP3.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "25b754cf-7a86-43e6-8727-b66434953158", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2" ], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "05bba1d41c46a68afc0f11b423dc140bd431315f52489b334d00ff3a938205ba", "checkEnabled": false, "collapsed": true, "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "AWS recently introduced the General Purpose SSD (gp3) volume type, which is designed to provide higher IOPS performance at a lower cost than the gp2 volume type.", "id": 347, "index": 347, "inputData": [ { "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" }, "volume_id": { "constant": false, "value": "\"iter.get(\\\\\"volume_id\\\\\")\"" } } ], "inputschema": [ { "properties": { "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "volume_id": { "description": "EBS Volume ID.", "title": "Volume ID", "type": "string" } }, "required": [ "region", "volume_id" ], "title": "aws_modify_ebs_volume_to_gp3", "type": "object" } ], "isUnskript": false, "iterData": [ { "iter_enabled": true, "iter_item": { "region": "region", "volume_id": "volume_id" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_non_gp3_volumes" } } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Modify EBS Volume to GP3", "nouns": [], "orderProperties": [ "region", "volume_id" ], "output": { "type": "" }, "outputParams": { "output_name": "modified_volumes", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "len(all_non_gp3_volumes)!=0", "tags": [], "title": "AWS Modify EBS Volume to GP3", "verbs": [] }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import List\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_modify_ebs_volume_to_gp3_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_modify_ebs_volume_to_gp3(handle, region: str, volume_id: str) -> List:\n", " \"\"\"aws_modify_ebs_volume_to_gp3 returns an array of modified details for EBS volumes.\n", "\n", " :type region: string\n", " :param region: Used to filter the volume for specific region.\n", "\n", " :type volume_id: string\n", " :param volume_id: EBS Volume ID.\n", "\n", " :rtype: List of modified details for EBS volumes\n", " \"\"\"\n", " result = []\n", " try:\n", " ec2Client = handle.client('ec2', region_name=region)\n", " volumes = ec2Client.modify_volume(VolumeId=volume_id, VolumeType='gp3')\n", " result.append(volumes)\n", " except Exception as e:\n", " result.append({\"error\": e})\n", "\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\",\n", " \"volume_id\": \"iter.get(\\\\\"volume_id\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_non_gp3_volumes\",\n", " \"iter_parameter\": [\"region\",\"volume_id\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_non_gp3_volumes)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"modified_volumes\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_modify_ebs_volume_to_gp3, lego_printer=aws_modify_ebs_volume_to_gp3_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "05396b66-dec6-4132-ac6c-49d5deefa68b", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "region,volume_type, volume_id
In this Runbook, we were able to change the type of those EBS volumes that weren't type GP3 to type GP3 by using unSkript's AWS actions. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Change AWS EBS Volume To GP3 Type", "parameters": [ "ebs_volume_ids", "ebs_volume_type", "region" ] }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "ebs_volume_ids": { "description": "List of EBS volume ID's ", "title": "ebs_volume_ids", "type": "array" }, "region": { "description": "AWS Region(s) to get EBS volumes. Eg: us-west-2", "title": "region", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": {}, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Change_EBS_Volume_To_GP3_Type.json ================================================ { "name": "Change AWS EBS Volume To GP3 Type", "description": "This runbook can be used to change the type of an EBS volume to GP3(General Purpose 3). GP3 type volume has a number of advantages over it's predecessors. gp3 volumes are ideal for a wide variety of applications that require high performance at low cost", "uuid": "2475714639442a9adcd0a87f7d193f6e8a6bbb9537d1eb6b03a6befb8ef84b19", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Change_Route53_TTL.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "82eebdfd-c880-40df-bd6d-5b546c92164b", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Get TTL under X hours
2) Change the TTL value
Using unSkript's Get Route53 TTL Under Hours , we will find the hosted zones and records that have a TTL under given threshold hours. A lower TTL means more queries arrive at the name servers because the cached values expire sooner.
\n", "\n", "\n", "This action takes the following parameters:
\n", "threshold(in hours)
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "8372128f-d195-47f3-b3a7-4482ae7e9764", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ROUTE53" ], "actionDescription": "AWS: Check for short Route 53 TTL", "actionEntryFunction": "aws_get_ttl_under_given_hours", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS: Check for short Route 53 TTL", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "a885ef62f7614e282856fdc37f0654f67b4ec7e7651350ea0dbb123788e705df", "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "AWS: Check for short Route 53 TTL", "execution_data": { "last_date_success_run_cell": "2023-05-18T08:06:27.645Z" }, "id": 2, "index": 2, "inputData": [ { "threshold": { "constant": false, "value": "int(threshold_ttl)" } } ], "inputschema": [ { "properties": { "threshold": { "default": 1, "description": "(In hours) A threshold in hours to verify route 53 TTL is within the threshold.", "title": "Threshold (In hours)", "type": "integer" } }, "required": [], "title": "aws_get_ttl_under_given_hours", "type": "object" } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS: Check for short Route 53 TTL", "orderProperties": [ "threshold" ], "outputParams": { "output_name": "lower_ttl_records", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not hosted_zone_id and not record_name and not record_type", "tags": [ "aws_get_ttl_under_given_hours" ], "uuid": "a885ef62f7614e282856fdc37f0654f67b4ec7e7651350ea0dbb123788e705df", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Tuple, Optional\n", "import pprint\n", "from unskript.connectors.aws import aws_get_paginator\n", "from unskript.legos.aws.aws_get_ttl_for_route53_records.aws_get_ttl_for_route53_records import aws_get_ttl_for_route53_records\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_ttl_under_given_hours_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_get_ttl_under_given_hours(handle, threshold: int = 1) -> Tuple:\n", " \"\"\"aws_get_ttl_under_x_hours Returns TTL for records in a hosted zone\n", "\n", " :type handle: object\n", " :param handle: Object returned by the task.validate(...) method.\n", "\n", " :type threshold: str\n", " :param threshold: (In hours) A threshold in hours to verify route 53 TTL is within the threshold.\n", "\n", " :rtype: List of details with the record type, record name and record TTL.\n", " \"\"\"\n", " result = []\n", " try:\n", " route_client = handle.client('route53')\n", " seconds = threshold * 3600\n", " hosted_zones = aws_get_paginator(route_client, \"list_hosted_zones\", \"HostedZones\")\n", " for zone in hosted_zones:\n", " record_ttl_data = aws_get_ttl_for_route53_records(handle, zone['Id'])\n", " for record_ttl in record_ttl_data:\n", " if isinstance(record_ttl['record_ttl'], str):\n", " continue\n", " elif record_ttl['record_ttl'] < seconds:\n", " records = {}\n", " records[\"hosted_zone_id\"] = zone['Id']\n", " records[\"record_name\"] = record_ttl['record_name']\n", " records[\"record_type\"] = record_ttl['record_type']\n", " records[\"record_ttl\"] = record_ttl['record_ttl']\n", " result.append(records)\n", " except Exception as e:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"threshold\": \"int(threshold_ttl)\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not hosted_zone_id and not record_name and not record_type\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"lower_ttl_records\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_ttl_under_given_hours, lego_printer=aws_get_ttl_under_given_hours_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "a311041f-620a-4b6b-914f-e52c6c3a71f4", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "lower_ttl_records
This action filters the output from Step 1 to get the non empty values
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "b85ce542-bdf0-44d2-9e75-213002d5c036", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-05-18T08:06:39.524Z" }, "jupyter": { "source_hidden": true }, "name": "Create List of Lower TTL records", "orderProperties": [], "tags": [], "title": "Create List of Lower TTL records" }, "outputs": [], "source": [ "# print(lower_ttl_records)\n", "all_lower_ttl_records = []\n", "try:\n", " if lower_ttl_records[0] == False:\n", " if len(lower_ttl_records[1])!=0:\n", " all_lower_ttl_records=lower_ttl_records[1]\n", "except Exception:\n", " data_dict = {}\n", " data_dict[\"hosted_zone_id\"] = hosted_zone_id\n", " data_dict[\"record_name\"] = record_name\n", " data_dict[\"record_type\"] = record_type\n", " all_lower_ttl_records.append(data_dict)\n", "print(all_lower_ttl_records)" ] }, { "cell_type": "markdown", "id": "9fb3704a-9b19-49c4-96ab-a982217bbcd3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_lower_ttl_records
This action changes the TTL value for a record that has a lower value to a higher one. By default 86400 seconds is considered if no value is given,
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "70e72194-c276-4f44-a9a9-d90b37488a94", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ROUTE53" ], "actionDescription": "Update TTL for an existing record in a hosted zone.", "actionEntryFunction": "aws_update_ttl_for_route53_records", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Update TTL for Route53 Record", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "a79077024e9d76970c61eb8d40f26129820fbed3cbec6b03e5610dbace0d2224", "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "Update TTL for an existing record in a hosted zone.", "id": 1, "index": 1, "inputData": [ { "hosted_zone_id": { "constant": false, "value": "\"iter.get(\\\\\"hosted_zone_id\\\\\")\"" }, "new_ttl": { "constant": false, "value": "int(new_ttl)" }, "record_name": { "constant": false, "value": "\"iter.get(\\\\\"record_name\\\\\")\"" }, "record_type": { "constant": false, "value": "\"iter.get(\\\\\"record_type\\\\\")\"" } } ], "inputschema": [ { "definitions": { "Route53RecordType": { "description": "An enumeration.", "enum": [ "A", "AAAA", "CAA", "CNAME", "DS", "MX", "NAPTR", "NS", "PTR", "SOA", "SPF", "SRV", "TXT" ], "title": "Route53RecordType", "type": "string" } }, "properties": { "hosted_zone_id": { "description": "ID of the hosted zone in Route53", "title": "Hosted Zone ID", "type": "string" }, "new_ttl": { "description": "New TTL value for a record. Eg: 300", "title": "New TTL", "type": "integer" }, "record_name": { "description": "Name of record in a hosted zone. Eg: example.com", "title": "Record Name", "type": "string" }, "record_type": { "allOf": [ { "$ref": "#/definitions/Route53RecordType" } ], "description": "Record Type of the record.", "title": "Record Type", "type": "enum" } }, "required": [ "hosted_zone_id", "new_ttl", "record_name", "record_type" ], "title": "aws_update_ttl_for_route53_records", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "hosted_zone_id": "hosted_zone_id", "record_name": "record_name", "record_type": "record_type" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_lower_ttl_records" } } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Update TTL for Route53 Record", "orderProperties": [ "hosted_zone_id", "new_ttl", "record_name", "record_type" ], "printOutput": true, "startcondition": "len(all_lower_ttl_records)!=0", "tags": [ "aws_update_ttl_for_route53_records" ], "uuid": "a79077024e9d76970c61eb8d40f26129820fbed3cbec6b03e5610dbace0d2224", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from unskript.enums.aws_route53_record_type_enums import Route53RecordType\n", "from typing import Dict\n", "import pprint\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_update_ttl_for_route53_records_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def aws_update_ttl_for_route53_records(handle, hosted_zone_id: str, record_name: str, record_type:Route53RecordType, new_ttl:int) -> Dict:\n", " \"\"\"aws_update_ttl_for_route53_records updates the TTL for a Route53 record in a hosted zone.\n", "\n", " :type handle: object\n", " :param handle: Object returned by the task.validate(...) method.\n", "\n", " :type hosted_zone_id: string\n", " :param hosted_zone_id: ID of the hosted zone in Route53\n", "\n", " :type record_name: string\n", " :param record_name: Name of record in a hosted zone. Eg: example.com\n", "\n", " :type record_type: string\n", " :param record_type: Record Type of the record.\n", "\n", " :type new_ttl: int\n", " :param new_ttl: New TTL value for a record. Eg: 300\n", "\n", " :rtype: Response of updation on new TTL\n", " \"\"\"\n", "\n", " route53Client = handle.client('route53')\n", " new_ttl_value = int(new_ttl)\n", "\n", " response = route53Client.change_resource_record_sets(\n", " HostedZoneId=hosted_zone_id,\n", " ChangeBatch={\n", " 'Changes': [\n", " {\n", " 'Action': 'UPSERT',\n", " 'ResourceRecordSet': {\n", " 'Name': record_name,\n", " 'Type': record_type,\n", " 'TTL': new_ttl_value\n", " }\n", " }\n", " ]\n", " }\n", " )\n", " return response\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"hosted_zone_id\": \"\\\\\"iter.get(\\\\\\\\\\\\\"hosted_zone_id\\\\\\\\\\\\\")\\\\\"\",\n", " \"new_ttl\": \"int(new_ttl)\",\n", " \"record_name\": \"\\\\\"iter.get(\\\\\\\\\\\\\"record_name\\\\\\\\\\\\\")\\\\\"\",\n", " \"record_type\": \"\\\\\"iter.get(\\\\\\\\\\\\\"record_type\\\\\\\\\\\\\")\\\\\"\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_lower_ttl_records\",\n", " \"iter_parameter\": [\"hosted_zone_id\",\"record_name\",\"record_type\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_lower_ttl_records)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_update_ttl_for_route53_records, lego_printer=aws_update_ttl_for_route53_records_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "9c7430c8-3660-45bd-90ef-9ceab77e3daa", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "hosted_zone_id, record_name, record_type, new_ttl
In this Runbook, we were able change the TTL(time to live) to a higher value. As a result, there are fewer queries received by the name servers which will help in saving your AWS costs. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Change AWS Route53 TTL", "parameters": null }, "kernelspec": { "display_name": "unSkript (Build: 1169)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "hosted_zone_id": { "description": "The ID of the hosted zone that contains the resource record sets that you want to change.", "title": "hosted_zone_id", "type": "string" }, "new_ttl": { "default": 86400, "description": "New TTL value (in seconds) that needs to be updated for the records. ", "title": "new_ttl", "type": "number" }, "record_name": { "description": "Record name for a particular hosted zone.", "title": "record_name", "type": "string" }, "record_type": { "description": "Record type of the record name hosted in a particular zone", "enum": [ "A", "AAAA", "CAA", "CNAME ", "DS", "MX", "NAPTR", "NS", "PTR", "SOA", "SPF", "SRV" ], "enumNames": [ "A", "AAAA", "CAA", "CNAME ", "DS", "MX", "NAPTR", "NS", "PTR", "SOA", "SPF", "SRV" ], "title": "record_type", "type": "string" }, "threshold_ttl": { "default": 1, "description": "Threshold(in hours) to check if the TTL is lower than the given value. Eg: 1, checks for all records whose TTL is less than 3600 seconds (1 hour)", "title": "threshold_ttl", "type": "number" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Change_Route53_TTL.json ================================================ { "name": "Change AWS Route53 TTL", "description": "For a record in a hosted zone, lower TTL means that more queries arrive at the name servers because the cached values expire sooner. If you configure a higher TTL for your records, then the intermediate resolvers cache the records for longer time. As a result, there are fewer queries received by the name servers. This configuration reduces the charges corresponding to the DNS queries answered. However, higher TTL slows the propagation of record changes because the previous values are cached for longer periods. This Runbook can be used to configure a higher value of a TTL .", "uuid": "a0773e52a3a3a8688e47a9e10eba1c680913d28a9a8c4466113181534bd1f972", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Create_New_IAM_User_With_Policy.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "8a97b231-94d6-4e10-a24c-6eac9a4572e4", "metadata": { "jupyter": { "source_hidden": false }, "name": "Add New IAM User", "orderProperties": [], "tags": [], "title": "Add New IAM User" }, "source": [ "Here we will use unSkript Create New IAM User action. This action creates an IAM user in AWS and assigns the given tag to the user.
\n", "\n", "\n", "Input parameters:
\n", "user_name,tag_key,tag_value
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "9fe78a10-d76f-4961-8e5c-bf381c5b3cc9", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "3f71dd060d5955f5dc9104dbaf418bf957b2222c510cb3afd09ded8e41e433d9", "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Create New IAM User", "id": 222, "index": 222, "inputData": [ { "tag_key": { "constant": false, "value": "tag_key" }, "tag_value": { "constant": false, "value": "tag_value" }, "user_name": { "constant": false, "value": "username" } } ], "inputschema": [ { "properties": { "tag_key": { "description": "Tag Key to new IAM User.", "title": "Tag Key", "type": "string" }, "tag_value": { "description": "Tag Value to new IAM User.", "title": "Tag Value", "type": "string" }, "user_name": { "description": "IAM User Name.", "title": "User Name", "type": "string" } }, "required": [ "user_name", "tag_key", "tag_value" ], "title": "aws_create_iam_user", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Create New IAM User", "nouns": [ "aws", "IAM", "user" ], "orderProperties": [ "user_name", "tag_key", "tag_value" ], "output": { "type": "" }, "outputParams": { "output_name": "user_details", "output_name_enabled": true }, "printOutput": true, "tags": [ "aws_create_iam_user" ], "title": "Create New IAM User", "verbs": [ "create" ] }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import List, Dict\n", "from pydantic import BaseModel, Field\n", "from botocore.exceptions import ClientError\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_create_iam_user_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_create_iam_user(handle, user_name: str, tag_key: str, tag_value: str) -> Dict:\n", " \"\"\"aws_create_iam_user Creates new IAM User.\n", "\n", " :type handle: object\n", " :param handle: Object returned by the task.validate(...) method\n", "\n", " :type user_name: string\n", " :param user_name: Name of new IAM User.\n", "\n", " :type tag_key: string\n", " :param tag_key: Tag Key assign to new User.\n", "\n", " :type tag_value: string\n", " :param tag_value: Tag Value assign to new User.\n", "\n", " :rtype: Dict with the stopped instances state info.\n", " \"\"\"\n", "\n", " ec2Client = handle.client(\"iam\")\n", " result = {}\n", " try:\n", " response = ec2Client.create_user(\n", " UserName=user_name,\n", " Tags=[\n", " {\n", " 'Key': tag_key,\n", " 'Value': tag_value\n", " }])\n", " result = response\n", " except ClientError as error:\n", " if error.response['Error']['Code'] == 'EntityAlreadyExists':\n", " result = error.response\n", " else:\n", " result = error.response\n", "\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(inputParamsJson='''{\n", " \"tag_key\": \"tag_key\",\n", " \"tag_value\": \"tag_value\",\n", " \"user_name\": \"username\"\n", " }''')\n", "task.configure(outputName=\"user_details\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_create_iam_user, lego_printer=aws_create_iam_user_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "c174d638-f107-450f-ab2d-d28cf097a722", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "Output variable:
\n", "user_details
This action only executes when step 1 successfully creates a user. In this action, we will pass the newly created username and temporary password, which will create an user profile for the user in AWS.
\n", "\n", "\n", "Input parameters:
\n", "user_name,password
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "35887cbc-bdb1-4f3b-8f59-a2bb78e9b605", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "7b52e5fdfddd113a1c489d95d5fd8c9a98043c6ea721588531db6a5261434975", "condition_enabled": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Create Login profile for IAM User", "id": 166, "index": 166, "inputData": [ { "password": { "constant": false, "value": "password" }, "user_name": { "constant": false, "value": "username" } } ], "inputschema": [ { "properties": { "password": { "description": "Password for IAM User.", "title": "Password", "type": "string" }, "user_name": { "description": "IAM User Name.", "title": "User Name", "type": "string" } }, "required": [ "user_name", "password" ], "title": "aws_create_user_login_profile", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Create Login profile for IAM User", "nouns": [ "aws", "IAM", "login" ], "orderProperties": [ "user_name", "password" ], "output": { "type": "" }, "outputParams": { "output_name": "profile_details", "output_name_enabled": true }, "printOutput": true, "startcondition": "'User' in UserInfo", "tags": [ "aws_create_user_login_profile" ], "verbs": [ "create" ] }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Dict\n", "from botocore.exceptions import ClientError\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_create_user_login_profile_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_create_user_login_profile(handle, user_name: str, password: str) -> Dict:\n", " \"\"\"aws_create_user_login_profile Create login profile for IAM User.\n", "\n", " :type handle: object\n", " :param handle: Object returned by the task.validate(...) method.\n", "\n", " :type user_name: string\n", " :param user_name: Name of new IAM User.\n", "\n", " :type password: string\n", "\n", " :rtype: Dict with the Profile Creation status info.\n", " \"\"\"\n", "\n", " ec2Client = handle.client(\"iam\")\n", " result = {}\n", " try:\n", " response = ec2Client.create_login_profile(\n", " UserName=user_name,\n", " PasswordResetRequired=True)\n", "\n", " result = response\n", " except ClientError as error:\n", " if error.response['Error']['Code'] == 'EntityAlreadyExists':\n", " result = error.response\n", " else:\n", " result = error.response\n", "\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(inputParamsJson='''{\n", " \"password\": \"password\",\n", " \"user_name\": \"username\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"'User' in UserInfo\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"profile_details\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_create_user_login_profile, lego_printer=aws_create_user_login_profile_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "29511895-d1cc-4a01-9990-8928642b5006", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-3", "orderProperties": [], "tags": [], "title": "Step-3" }, "source": [ "Output variable:
\n", "profile_details
Here we will use unSkript Get Caller Identity Action action. These Action does not take any inputs. shows the caller's identity for the current user.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 5, "id": "dd1e1542-ddd7-4b86-86a2-17e999458fbd", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "afacde59-a401-4a8b-901d-46c4b3970b78", "createTime": "2022-07-27T16:51:48Z", "credentialsJson": {}, "currentVersion": "v0.0.0", "description": "Test", "execution_data": { "last_date_success_run_cell": "2022-09-02T16:44:27.574Z" }, "id": 100001, "index": 100001, "inputschema": [ { "properties": {}, "required": [ "instance_ids", "region" ], "title": "aws_restart_ec2_instances_test", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Get Caller Identity ", "nouns": [], "orderProperties": [], "output": { "type": "" }, "outputParams": { "output_name": "caller_details", "output_name_enabled": true }, "printOutput": true, "tags": [], "title": "Get Caller Identity ", "verbs": [] }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import List, Dict\n", "from pydantic import BaseModel, Field\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_caller_identity_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_get_caller_identity(handle) -> Dict:\n", " ec2Client = handle.client('sts')\n", " response = ec2Client.get_caller_identity()\n", "\n", " return response\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(outputName=\"caller_details\")\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_caller_identity, lego_printer=aws_get_caller_identity_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "d1f05583-fa8c-4f8c-a357-3f6154df4620", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-4", "orderProperties": [], "tags": [], "title": "Step-4" }, "source": [ "Output variable:
\n", "caller_details
Here we will use unSkript Post Slack Message action. These actions send a message on the Slack channel with the newly created username.
\n", "\n", "\n", "Input parameters:
\n", "channel,message
\n", "" ] }, { "cell_type": "code", "execution_count": 6, "id": "8cacd129-1fed-4c9e-9f2f-70da41c43c88", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "6a87f83ab0ecfeecb9c98d084e2b1066c26fa64be5b4928d5573a5d60299802d", "condition_enabled": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Post Slack Message", "execution_data": { "last_date_success_run_cell": "2022-09-02T16:45:12.785Z" }, "id": 62, "index": 62, "inputData": [ { "channel": { "constant": false, "value": "channel" }, "message": { "constant": false, "value": "\"New IAM user {}\".format(user_name)" } } ], "inputschema": [ { "properties": { "channel": { "default": "", "description": "Name of the slack channel where the message to be posted", "title": "Channel", "type": "string" }, "message": { "default": "", "description": "Message to be sent", "title": "Message", "type": "string" } }, "required": [ "channel", "message" ], "title": "slack_post_message", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_SLACK", "name": "Post Slack Message", "nouns": [ "slack", "message" ], "orderProperties": [ "channel", "message" ], "output": { "type": "" }, "outputParams": { "output_name": "send_status", "output_name_enabled": true }, "printOutput": true, "startcondition": "'User' in UserInfo and not channel", "tags": [ "slack_post_message" ], "verbs": [ "post" ] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "\n", "import pprint\n", "\n", "from pydantic import BaseModel, Field\n", "from slack_sdk import WebClient\n", "from slack_sdk.errors import SlackApiError\n", "\n", "pp = pprint.PrettyPrinter(indent=2)\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def slack_post_message_printer(output):\n", " if output is not None:\n", " pprint.pprint(output)\n", " else:\n", " return\n", "\n", "\n", "@beartype\n", "def slack_post_message(\n", " handle: WebClient,\n", " channel: str,\n", " message: str) -> str:\n", "\n", " try:\n", " response = handle.chat_postMessage(\n", " channel=channel,\n", " text=message)\n", " return f\"Successfuly Sent Message on Channel: #{channel}\"\n", " except SlackApiError as e:\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.response['error']}\")\n", " if e.response['error'] == 'channel_not_found':\n", " raise Exception('Channel Not Found')\n", " elif e.response['error'] == 'duplicate_channel_not_found':\n", " raise Exception('Channel associated with the message_id not valid')\n", " elif e.response['error'] == 'not_in_channel':\n", " raise Exception('Cannot post message to channel user is not in')\n", " elif e.response['error'] == 'is_archived':\n", " raise Exception('Channel has been archived')\n", " elif e.response['error'] == 'msg_too_long':\n", " raise Exception('Message text is too long')\n", " elif e.response['error'] == 'no_text':\n", " raise Exception('Message text was not provided')\n", " elif e.response['error'] == 'restricted_action':\n", " raise Exception('Workspace preference prevents user from posting')\n", " elif e.response['error'] == 'restricted_action_read_only_channel':\n", " raise Exception('Cannot Post message, read-only channel')\n", " elif e.response['error'] == 'team_access_not_granted':\n", " raise Exception('The token used is not granted access to the workspace')\n", " elif e.response['error'] == 'not_authed':\n", " raise Exception('No Authtnecition token provided')\n", " elif e.response['error'] == 'invalid_auth':\n", " raise Exception('Some aspect of Authentication cannot be validated. Request denied')\n", " elif e.response['error'] == 'access_denied':\n", " raise Exception('Access to a resource specified in the request denied')\n", " elif e.response['error'] == 'account_inactive':\n", " raise Exception('Authentication token is for a deleted user')\n", " elif e.response['error'] == 'token_revoked':\n", " raise Exception('Authentication token for a deleted user has been revoked')\n", " elif e.response['error'] == 'no_permission':\n", " raise Exception('The workspace toekn used does not have necessary permission to send message')\n", " elif e.response['error'] == 'ratelimited':\n", " raise Exception('The request has been ratelimited. Retry sending message later')\n", " elif e.response['error'] == 'service_unavailable':\n", " raise Exception('The service is temporarily unavailable')\n", " elif e.response['error'] == 'fatal_error':\n", " raise Exception('The server encountered catostrophic error while sending message')\n", " elif e.response['error'] == 'internal_error':\n", " raise Exception('The server could not complete operation, likely due to transietn issue')\n", " elif e.response['error'] == 'request_timeout':\n", " raise Exception('Sending message error via POST: either message was missing or truncated')\n", " else:\n", " raise Exception(f'Failed Sending Message to slack channel {channel} Error: {e.response[\"error\"]}')\n", "\n", " except Exception as e:\n", " print(\"\\n\\n\")\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.__str__()}\")\n", " return f\"Unable to send message on {channel}\"\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(inputParamsJson='''{\n", " \"channel\": \"channel\",\n", " \"message\": \"\\\\\"New IAM user {}\\\\\".format(user_name)\"\n", " }''')\n", "\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"'User' in UserInfo and not channel\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"send_status\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(slack_post_message, lego_printer=slack_post_message_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "e9df5398-15b1-4279-92b8-d4c62372afed", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "### Conclusion\n", "In this Runbook, we demonstrated the use of unSkript's AWS and slack actions to perform AWS create new IAM user, login profile and also show the caller identity of the user. On Success, post a message on the slack channel about the User creation. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io" ] } ], "metadata": { "execution_data": { "runbook_name": "Create IAM User with policy", "parameters": [ "channel", "password", "username" ] }, "kernelspec": { "display_name": "Python 3.10.6 64-bit", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "parameterSchema": { "properties": { "channel": { "description": "Slack Channel Name to send the new User Information. Example random, general", "title": "channel", "type": "string" }, "password": { "description": "Login profile password for new IAM user.", "format": "password", "title": "password", "type": "string", "writeOnly": true }, "username": { "description": "Name of the user that needs to be created", "title": "username", "type": "string" } }, "required": [ "username", "password" ], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Create_New_IAM_User_With_Policy.json ================================================ { "name": "Create IAM User with policy", "description": "Create new IAM user with a security Policy. Sends confirmation to Slack.", "uuid": "1ce85aa2153d808bd95a21a4545c51f239696bc41f55d30b6849cd8218381ffc", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Delete_EBS_Volumes_Attached_To_Stopped_Instances.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "5424264e-6195-4cf9-906b-24b02d5a83f3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "Output variable:
\n", "send_status
\n", "
1) Get volumes for stopped instances
2) Delete EBS volumes
Using unSkript's Get Stopped Instances EBS volumes action, we will find volumes which are associated with stopped instances.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "3c1fe36b-eb58-4827-9c00-f6b03b8d7a4a", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2", "CATEGORY_TYPE_AWS_EBS" ], "actionDescription": "This action helps to list the volumes that are attached to stopped instances.", "actionEntryFunction": "aws_get_stopped_instance_volumes", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "a9d17f4c9feb963b6096290eedb21af43d89e803cdcb1238dc11a544a3071a1e" ], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Get Stopped Instance Volumes", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "fee89ce72c745afdc666dc59d1a4f29ca3419640824684151b9464e96d1e27a7", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "This action helps to list the volumes that are attached to stopped instances.", "execution_data": { "last_date_success_run_cell": "2023-05-17T16:47:59.430Z" }, "id": 1, "index": 1, "inputData": [ { "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [], "title": "aws_get_stopped_instance_volumes", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Get Stopped Instance Volumes", "orderProperties": [ "region" ], "outputParams": { "output_name": "stopped_instances_volumes", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not volume_ids", "tags": [], "uuid": "fee89ce72c745afdc666dc59d1a4f29ca3419640824684151b9464e96d1e27a7", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "import pprint\n", "from typing import Optional, Tuple\n", "from pydantic import BaseModel, Field\n", "from unskript.connectors.aws import aws_get_paginator\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_stopped_instance_volumes_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_get_stopped_instance_volumes(handle, region: str = \"\") -> Tuple:\n", " \"\"\"aws_get_stopped_instance_volumes Returns an array of volumes.\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: string\n", " :param region: Region to filter instances.\n", "\n", " :rtype: Array of volumes.\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", "\n", " for reg in all_regions:\n", " try:\n", " ec2Client = handle.client('ec2', region_name=reg)\n", " res = aws_get_paginator(ec2Client, \"describe_instances\", \"Reservations\")\n", " for reservation in res:\n", " for instance in reservation['Instances']:\n", " if instance['State']['Name'] == 'stopped':\n", " block_device_mappings = instance['BlockDeviceMappings']\n", " for mapping in block_device_mappings:\n", " if 'Ebs' in mapping:\n", " ebs_volume = {}\n", " volume_id = mapping['Ebs']['VolumeId']\n", " ebs_volume[\"volume_id\"] = volume_id\n", " ebs_volume[\"region\"] = reg\n", " result.append(ebs_volume)\n", " except Exception:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " return (True, None)\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\"\n", " }''')\n", "\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not volume_ids\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"stopped_instances_volumes\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_stopped_instance_volumes, lego_printer=aws_get_stopped_instance_volumes_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "199591ef-cb3a-49b7-b515-3c6998050320", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "stopped_instances_volumes
This action filters regions that have no volumes associated with stopped instances and creates a list of those that have them.
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "6a10e980-9f17-4436-9166-90ea130aa316", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-04-21T09:16:07.861Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Stopped Instance Volumes", "orderProperties": [], "tags": [], "title": "Create List of Stopped Instance Volumes" }, "outputs": [], "source": [ "all_stopped_instances_volumes = []\n", "try:\n", " if stopped_instances_volumes[0] == False:\n", " for instance in stopped_instances_volumes[1]:\n", " all_stopped_instances_volumes.append(instance)\n", "except Exception as e:\n", " if volume_ids:\n", " for vol_id in volume_ids:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"volume_id\"] = vol_id\n", " all_stopped_instances_volumes.append(data_dict)\n", " else:\n", " raise Exception(e)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "978d3b61-2fd9-461d-89bd-534d2dcf3b63", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_stopped_instances_volumes
This action deletes volumes found in Step 1.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "a48df07b-4723-4413-a1fa-19bfb08ba48e", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2" ], "actionDescription": "Delete AWS Volume by Volume ID", "actionEntryFunction": "aws_delete_volume_by_id", "actionIsCheck": false, "actionIsRemediation": true, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Delete AWS EBS Volume by Volume ID", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "e8cccc03e1af323982c0ab9f06c01127c0481ca81943eb7e82e46245140b1059", "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "Delete AWS Volume by Volume ID", "id": 2, "index": 2, "inputData": [ { "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" }, "volume_id": { "constant": false, "value": "\"iter.get(\\\\\"volume_id\\\\\")\"" } } ], "inputschema": [ { "properties": { "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "volume_id": { "description": "Volume ID.", "title": "Volume ID", "type": "string" } }, "required": [ "volume_id", "region" ], "title": "aws_delete_volume_by_id", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "region": "region", "volume_id": "volume_id" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_stopped_instances_volumes" } } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Delete AWS EBS Volume by Volume ID", "orderProperties": [ "volume_id", "region" ], "printOutput": true, "startcondition": "len(all_stopped_instances_volumes)!=0", "tags": [], "uuid": "e8cccc03e1af323982c0ab9f06c01127c0481ca81943eb7e82e46245140b1059", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2022 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import List\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_delete_volume_by_id_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint({\"Output\": output})\n", "\n", "\n", "@beartype\n", "def aws_delete_volume_by_id(handle, volume_id: str, region: str) -> str:\n", " \"\"\"aws_filter_ebs_unattached_volumes Returns an array of ebs volumes.\n", "\n", " :type handle: object\n", " :param handle: Object returned by the task.validate(...) method.\n", "\n", " :type region: string\n", " :param region: Used to filter the volume for specific region.\n", "\n", " :type volume_id: string\n", " :param volume_id: Volume ID needed to delete particular volume.\n", "\n", " :rtype: Result of the API in the List form.\n", " \"\"\"\n", " result = []\n", "\n", " ec2Client = handle.client('ec2',region_name=region)\n", "\n", " # Adding logic for deletion criteria\n", " try:\n", " response = ec2Client.delete_volume(VolumeId=volume_id,)\n", " result.append(response)\n", " except Exception as e:\n", " result.append(e)\n", "\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"volume_id\": \"iter.get(\\\\\"volume_id\\\\\")\",\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_stopped_instances_volumes\",\n", " \"iter_parameter\": [\"volume_id\",\"region\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_stopped_instances_volumes)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_delete_volume_by_id, lego_printer=aws_delete_volume_by_id_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "44a6cf05-385b-445d-a503-ad4aa607a568", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "volume_id, region
In this Runbook, we were able to get EBS volumes attached to EC2 instances that have been stopped and delete them. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Delete EBS Volume Attached to Stopped Instances", "parameters": [ "region", "threshold_days" ] }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "region": { "description": "AWS Regions to get the EBS volumes from. e.g. us-west-2. If nothing is given all regions will be considered.", "title": "region", "type": "string" }, "volume_ids": { "description": "List of EBS Volume IDs.", "title": "volume_ids", "type": "array" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Delete_EBS_Volumes_Attached_To_Stopped_Instances.json ================================================ { "name": "Delete EBS Volume Attached to Stopped Instances", "description": "EBS (Elastic Block Storage) volumes are attached to EC2 Instances as storage devices. Unused (Unattached) EBS Volumes can keep accruing costs even when their associated EC2 instances are no longer running. These volumes need to be deleted if the instances they are attached to are no more required. This runbook helps us find such volumes and delete them.", "uuid": "a9d17f4c9feb963b6096290eedb21af43d89e803cdcb1238dc11a544a3071a1e", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Delete_EBS_Volumes_With_Low_Usage.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "5424264e-6195-4cf9-906b-24b02d5a83f3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Find EBS volumes with low usage
2) Delete EBS volumes
Using unSkript's Find EBS volumes with low usage action, we will find volumes with a low usage given a threshold number of days using the VolumeUsage metric in Cloudwatch metric statistics.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region, threhold_days
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "8cc5f039-fc8e-46ff-879b-977d6413e6df", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS" ], "actionDescription": "This action list low use volumes from AWS which used <10% capacity from the given threshold days.", "actionEntryFunction": "aws_get_ebs_volume_for_low_usage", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "c9e1563d58cd6e3778a6c3fb11643498e3cdf3965a18c09214423998d62847b8" ], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_OBJECT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Get EBS Volumes for Low Usage", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "c4fcaf0f517e1f7522cfa0f551857a760298211e4cb65a485df40e7770b8fbcd", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "This action list low use volumes from AWS which used <10% capacity from the given threshold days.", "id": 4, "index": 4, "inputData": [ { "region": { "constant": false, "value": "region" }, "threshold_days": { "constant": false, "value": "int(threshold)" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region.", "title": "Region", "type": "string" }, "threshold_days": { "default": 10, "description": "(in days)\u00a0The threshold to check the EBS volume usage less than the threshold.", "title": "Threshold (In days)", "type": "integer" } }, "required": [], "title": "aws_get_ebs_volume_for_low_usage", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Get EBS Volumes for Low Usage", "orderProperties": [ "region", "threshold_days" ], "outputParams": { "output_name": "low_usage_volumes", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not volume_ids", "tags": [ "aws_get_ebs_volume_for_low_usage" ], "uuid": "c4fcaf0f517e1f7522cfa0f551857a760298211e4cb65a485df40e7770b8fbcd", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "import pprint\n", "from typing import Optional, Tuple\n", "from datetime import datetime, timedelta\n", "from pydantic import BaseModel, Field\n", "from unskript.connectors.aws import aws_get_paginator\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_ebs_volume_for_low_usage_printer(output):\n", " if output is None:\n", " return\n", "\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_get_ebs_volume_for_low_usage(handle, region: str = \"\", threshold_days: int = 10) -> Tuple:\n", " \"\"\"aws_get_ebs_volume_for_low_usage Returns an array of ebs volumes.\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :type threshold_days: int\n", " :param threshold_days: (in days)\u00a0The threshold to check the EBS volume usage\n", " less than the threshold.\n", "\n", " :rtype: Tuple with status result and list of EBS Volume.\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", "\n", " for reg in all_regions:\n", " try:\n", " # Filtering the volume by region\n", " ec2Client = handle.client('ec2', region_name=reg)\n", " response = aws_get_paginator(ec2Client, \"describe_volumes\", \"Volumes\")\n", " now = datetime.utcnow()\n", " days_ago = now - timedelta(days=threshold_days)\n", " # collecting the volumes which has zero attachments\n", " for volume in response:\n", " ebs_volume = {}\n", " volume_id = volume[\"VolumeId\"]\n", " cloudwatch = handle.client('cloudwatch', region_name=reg)\n", " cloudwatch_response = cloudwatch.get_metric_statistics(\n", " Namespace='AWS/EBS',\n", " MetricName='VolumeUsage',\n", " Dimensions=[\n", " {\n", " 'Name': 'VolumeId',\n", " 'Value': volume_id\n", " }\n", " ],\n", " StartTime=days_ago,\n", " EndTime=now,\n", " Period=3600,\n", " Statistics=['Average']\n", " )\n", " for v in cloudwatch_response['Datapoints']:\n", " if v['Average'] < 10:\n", " volume_ids = v['Dimensions'][0]['Value']\n", " ebs_volume[\"volume_id\"] = volume_ids\n", " ebs_volume[\"region\"] = reg\n", " result.append(ebs_volume)\n", " except Exception:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"threshold_days\": \"int(threshold)\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not volume_ids\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"low_usage_volumes\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_ebs_volume_for_low_usage, lego_printer=aws_get_ebs_volume_for_low_usage_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "199591ef-cb3a-49b7-b515-3c6998050320", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "low_usage_volumes
This action filters regions that have no low usage volumes and creates a list of those that have them.
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "6a10e980-9f17-4436-9166-90ea130aa316", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-04-20T17:26:22.391Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Low Usage Volumes", "orderProperties": [], "tags": [], "title": "Create List of Low Usage Volumes" }, "outputs": [], "source": [ "all_low_usage_volumes = []\n", "try:\n", " if low_usage_volumes[0] == False:\n", " if len(low_usage_volumes[1])!=0:\n", " all_low_usage_volumes=low_usage_volumes[1]\n", "except Exception:\n", " for vol_id in volume_ids:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"volume_id\"] = vol_id\n", " all_low_usage_volumes.append(data_dict)\n", "print(all_low_usage_volumes)" ] }, { "cell_type": "markdown", "id": "978d3b61-2fd9-461d-89bd-534d2dcf3b63", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_low_usage_volumes
This action deleted Low Usage Volumes found in Step 1.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "d6058839-742f-4456-872a-e8e7b42dd51b", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2" ], "actionDescription": "Delete AWS Volume by Volume ID", "actionEntryFunction": "aws_delete_volume_by_id", "actionIsCheck": false, "actionIsRemediation": true, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Delete AWS EBS Volume by Volume ID", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "e8cccc03e1af323982c0ab9f06c01127c0481ca81943eb7e82e46245140b1059", "collapsed": true, "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "Delete AWS Volume by Volume ID", "id": 2, "index": 2, "inputData": [ { "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" }, "volume_id": { "constant": false, "value": "\"iter.get(\\\\\"volume_id\\\\\")\"" } } ], "inputschema": [ { "properties": { "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "volume_id": { "description": "Volume ID.", "title": "Volume ID", "type": "string" } }, "required": [ "volume_id", "region" ], "title": "aws_delete_volume_by_id", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "region": "region", "volume_id": "volume_id" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_low_usage_volumes" } } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Delete AWS EBS Volume by Volume ID", "orderProperties": [ "volume_id", "region" ], "printOutput": true, "startcondition": "len(all_low_usage_volumes)!=0", "tags": [ "aws_delete_volume_by_id" ], "uuid": "e8cccc03e1af323982c0ab9f06c01127c0481ca81943eb7e82e46245140b1059", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2022 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import List\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_delete_volume_by_id_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint({\"Output\": output})\n", "\n", "\n", "@beartype\n", "def aws_delete_volume_by_id(handle, volume_id: str, region: str) -> str:\n", " \"\"\"aws_filter_ebs_unattached_volumes Returns an array of ebs volumes.\n", "\n", " :type handle: object\n", " :param handle: Object returned by the task.validate(...) method.\n", "\n", " :type region: string\n", " :param region: Used to filter the volume for specific region.\n", "\n", " :type volume_id: string\n", " :param volume_id: Volume ID needed to delete particular volume.\n", "\n", " :rtype: Result of the API in the List form.\n", " \"\"\"\n", " result = []\n", "\n", " ec2Client = handle.client('ec2',region_name=region)\n", "\n", " # Adding logic for deletion criteria\n", " try:\n", " response = ec2Client.delete_volume(VolumeId=volume_id,)\n", " result.append(response)\n", " except Exception as e:\n", " result.append(e)\n", "\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"volume_id\": \"iter.get(\\\\\"volume_id\\\\\")\",\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_low_usage_volumes\",\n", " \"iter_parameter\": [\"volume_id\",\"region\"]\n", " }''')\n", "\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_low_usage_volumes)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_delete_volume_by_id, lego_printer=aws_delete_volume_by_id_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "44a6cf05-385b-445d-a503-ad4aa607a568", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "volume_id, region
In this Runbook, we were able to filter low usage volumes before a given threshold number of days and delete them. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Delete EBS Volume With Low Usage", "parameters": [ "region", "threshold_days" ] }, "kernelspec": { "display_name": "unSkript (Build: 1169)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "region": { "description": "AWS Region to get the EBS volumes from. Eg: \"us-west-2\". If nothing is given all regions will be considered.", "title": "region", "type": "string" }, "threshold": { "default": 10, "description": "The threshold number of days to check the low usage of volumes", "title": "threshold", "type": "number" }, "volume_ids": { "description": "List of EBS Volume IDs.", "title": "volume_ids", "type": "array" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": {}, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Delete_EBS_Volumes_With_Low_Usage.json ================================================ { "name": "Delete EBS Volume With Low Usage", "description": "This runbook can help us identify low usage Amazon Elastic Block Store (EBS) volumes and delete these volumes in order to lower the cost of your AWS bill. This is calculates using the VolumeUsage metric. It measures the percentage of the total storage space that is currently being used by an EBS volume. This metric is reported as a percentage value between 0 and 100.", "uuid": "c9e1563d58cd6e3778a6c3fb11643498e3cdf3965a18c09214423998d62847b8", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Delete_ECS_Clusters_with_Low_CPU_Utilization.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "5424264e-6195-4cf9-906b-24b02d5a83f3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Find ECS Clusters with Low CPU Utilization
2) Delete ECS Clusters
Using unSkript's Find ECS Clusters with Low CPU Utilization action, we will find clusters with a low CPU utilization given a threshold percentage using the CPUUtilization attribue found in the statistics list of descibe_clusters API.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region, threshold
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "479be1c9-ac15-43f7-9c17-0736c9c41a31", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2", "CATEGORY_TYPE_AWS_EBC" ], "actionDescription": "This action searches for clusters that have low CPU utilization.", "actionEntryFunction": "aws_list_clusters_with_low_utilization", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "6ad946fb1afd19286a8e7771e0f8e5566e4fdd54e3e2473385b5ac8e206e0a49" ], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS List ECS Clusters with Low CPU Utilization", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "25235bca4ec5a70c9c8a83bcdeff08c66bd9cb1a3a61a0e3136958631329d8ce", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "This action searches for clusters that have low CPU utilization.", "id": 1, "index": 1, "inputData": [ { "region": { "constant": false, "value": "region" }, "threshold": { "constant": false, "value": "int(threshold)" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region.", "title": "Region", "type": "string" }, "threshold": { "default": 10, "description": "Threshold to check for cpu utilization is less than threshold.", "title": "Threshold (In percent)", "type": "integer" } }, "required": [], "title": "aws_list_clusters_with_low_utilization", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS List ECS Clusters with Low CPU Utilization", "orderProperties": [ "region", "threshold" ], "outputParams": { "output_name": "low_cpu_clusters", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not cluster_names", "tags": [ "aws_list_clusters_with_low_utilization" ], "uuid": "25235bca4ec5a70c9c8a83bcdeff08c66bd9cb1a3a61a0e3136958631329d8ce", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "import pprint\n", "from typing import Optional, Tuple\n", "from pydantic import BaseModel, Field\n", "from unskript.connectors.aws import aws_get_paginator\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_list_clusters_with_low_utilization_printer(output):\n", " if output is None:\n", " return\n", "\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_list_clusters_with_low_utilization(handle, region: str = \"\", threshold: int = 10) -> Tuple:\n", " \"\"\"aws_list_clusters_with_low_utilization Returns an array of ecs clusters.\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :type threshold: int\n", " :param threshold: (In percent) Threshold to check for cpu utilization\n", " is less than threshold.\n", "\n", " :rtype: List of clusters for low CPU utilization\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", "\n", " for reg in all_regions:\n", " try:\n", " ecs_Client = handle.client('ecs', region_name=reg)\n", " response = aws_get_paginator(ecs_Client, \"list_clusters\", \"clusterArns\")\n", " for cluster in response:\n", " cluster_dict = {}\n", " cluster_name = cluster.split('/')[1]\n", " stats = ecs_Client.describe_clusters(clusters=[cluster])['clusters'][0]['statistics']\n", " for stat in stats:\n", " if stat['name'] == 'CPUUtilization':\n", " cpu_utilization = int(stat['value'])\n", " if cpu_utilization < threshold:\n", " cluster_dict[\"cluster_name\"] = cluster_name\n", " cluster_dict[\"region\"] = reg\n", " result.append(cluster_dict)\n", " except Exception:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"threshold\": \"int(threshold)\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not cluster_names\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"low_cpu_clusters\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_list_clusters_with_low_utilization, lego_printer=aws_list_clusters_with_low_utilization_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "199591ef-cb3a-49b7-b515-3c6998050320", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "low_cpu_clusters
This action filters regions that have no clusters with low CPU utilization and creates a list of those that have them.
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "6a10e980-9f17-4436-9166-90ea130aa316", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-04-27T10:59:05.263Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Low CPU Utilization Clusters", "orderProperties": [], "tags": [], "title": "Create List of Low CPU Utilization Clusters" }, "outputs": [], "source": [ "all_low_cpu_clusters = []\n", "try:\n", " if low_cpu_clusters[0] == False:\n", " if len(low_cpu_clusters[1])!=0:\n", " all_low_cpu_clusters=low_cpu_clusters[1]\n", "except Exception:\n", " for vol_id in volume_ids:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"volume_id\"] = vol_id\n", " all_low_cpu_clusters.append(data_dict)\n", "print(all_low_cpu_clusters)" ] }, { "cell_type": "markdown", "id": "978d3b61-2fd9-461d-89bd-534d2dcf3b63", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_low_cpu_clusters
This action deletes clusters found in Step 1.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "b96c00e4-7519-49e3-bcd4-d1b7f921759c", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS" ], "actionDescription": "Delete AWS ECS Cluster", "actionEntryFunction": "aws_delete_ecs_cluster", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Delete ECS Cluster", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "1bdeb0fd1addf317585a71f771a1706ab9ae888f33dbddaeb126be1e454ff3a6", "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "Delete AWS ECS Cluster", "id": 1, "index": 1, "inputData": [ { "cluster_name": { "constant": false, "value": "\"iter.get(\\\\\"cluster_name\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "cluster_name": { "description": "ECS Cluster name that needs to be deleted", "title": "ECS Cluster Name", "type": "string" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [ "region", "cluster_name" ], "title": "aws_delete_ecs_cluster", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "cluster_name": "cluster_name", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_low_cpu_clusters" } } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Delete ECS Cluster", "orderProperties": [ "region", "cluster_name" ], "printOutput": true, "startcondition": "len(all_low_cpu_clusters)!=0", "tags": [ "aws_delete_ecs_cluster" ], "uuid": "1bdeb0fd1addf317585a71f771a1706ab9ae888f33dbddaeb126be1e454ff3a6", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Dict\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_delete_ecs_cluster_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_delete_ecs_cluster(handle, region: str, cluster_name: str) -> Dict:\n", " \"\"\"aws_delete_ecs_cluster dict of loadbalancers info.\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :type cluster_name: string\n", " :param cluster_name: ECS Cluster name\n", "\n", " :rtype: dict of load balancers info.\n", " \"\"\"\n", " try:\n", " ec2Client = handle.client('ecs', region_name=region)\n", " response = ec2Client.delete_cluster(cluster=cluster_name)\n", " return response\n", " except Exception as e:\n", " raise Exception(e)\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=False)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\",\n", " \"cluster_name\": \"iter.get(\\\\\"cluster_name\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_low_cpu_clusters\",\n", " \"iter_parameter\": [\"region\",\"cluster_name\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_low_cpu_clusters)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_delete_ecs_cluster, lego_printer=aws_delete_ecs_cluster_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "44a6cf05-385b-445d-a503-ad4aa607a568", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "volume_id, region
In this Runbook, we were able to filter low CPU utilization ECS clusters given threshold percentage and delete them. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Delete ECS Clusters with Low CPU Utilization", "parameters": [ "region", "threshold_days" ] }, "kernelspec": { "display_name": "unSkript (Build: 1169)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "cluster_names": { "description": "List of ECS cluster names", "title": "cluster_names", "type": "array" }, "region": { "description": "AWS Region to get the EBS volumes from. \"us-west-2\". If nothing is given all regions will be considered.", "title": "region", "type": "string" }, "threshold": { "default": 10, "description": "Threshold (in percent) to check for the CPU utilization of clusters below the given threshold.", "title": "threshold", "type": "number" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Delete_ECS_Clusters_with_Low_CPU_Utilization.json ================================================ { "name": "Delete ECS Clusters with Low CPU Utilization", "description": "ECS clusters are a managed service that allows users to run Docker containers on AWS, making it easier to manage and scale containerized applications. However, running ECS clusters with low CPU utilization can result in wasted resources and unnecessary costs. AWS charges for the resources allocated to a cluster, regardless of whether they are fully utilized or not. By deleting clusters that are not being fully utilized, you can reduce the number of resources being allocated and lower the overall cost of running ECS. Furthermore, deleting unused or low-utilization clusters can also improve overall system performance by freeing up resources for other applications that require more processing power. This runbook helps us to identify such clusters and delete them.", "uuid": "6ad946fb1afd19286a8e7771e0f8e5566e4fdd54e3e2473385b5ac8e206e0a49", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Delete_ELBs_With_No_Targets_Or_Instances.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "82eebdfd-c880-40df-bd6d-5b546c92164b", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Find AWS ELBs with no targets or instances
2) Delete Load Balancers
Using unSkript's Find AWS ELBs with no targets or instances action, we will find ELBs that don't have any target groups in the case of Application Load Balancers's or Network Load Balancers's and Classic Load Balancers that have no instances associated to them.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "9f8e20f7-82ce-46ce-8dd8-2be94cab9174", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ELB" ], "actionDescription": "Find AWS ELBs with no targets or instances attached to them.", "actionEntryFunction": "aws_find_elbs_with_no_targets_or_instances", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Find AWS ELBs with no targets or instances", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "ed9c71d09866b0a019abe4f10951f32f9484504e0e274eb3d248e8bc321cb257", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "Find AWS ELBs with no targets or instances attached to them.", "execution_data": { "last_date_success_run_cell": "2023-07-14T16:27:55.801Z" }, "id": 1, "index": 1, "inputData": [ { "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region.", "title": "region", "type": "string" } }, "required": [], "title": "aws_find_elbs_with_no_targets_or_instances", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Find AWS ELBs with no targets or instances", "orderProperties": [ "region" ], "outputParams": { "output_name": "elbs_with_no_targets", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not elb_arns and not elb_names", "tags": [ "aws_find_elbs_with_no_targets_or_instances" ], "uuid": "ed9c71d09866b0a019abe4f10951f32f9484504e0e274eb3d248e8bc321cb257", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Tuple\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "from unskript.connectors.aws import aws_get_paginator\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_find_elbs_with_no_targets_or_instances_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def aws_find_elbs_with_no_targets_or_instances(handle, region: str = \"\")->Tuple:\n", " \"\"\"aws_find_elbs_with_no_targets_or_instances Returns details of Elb's with no target groups or instances\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: str\n", " :param region: AWS Region\n", "\n", " :rtype: Tuple of status, and details of ELB's with no targets or instances\n", " \"\"\"\n", " result = []\n", " all_load_balancers = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", " for reg in all_regions:\n", " try:\n", " elbv2Client = handle.client('elbv2', region_name=reg)\n", " elbv2_response = aws_get_paginator(elbv2Client, \"describe_load_balancers\", \"LoadBalancers\")\n", " elbClient = handle.client('elb', region_name=reg)\n", " elb_response = elbClient.describe_load_balancers()\n", " for lb in elbv2_response:\n", " elb_dict = {}\n", " elb_dict[\"load_balancer_name\"] = lb['LoadBalancerName']\n", " elb_dict[\"load_balancer_arn\"] = lb['LoadBalancerArn']\n", " elb_dict[\"load_balancer_type\"] = lb['Type']\n", " elb_dict[\"load_balancer_dns\"] = lb['DNSName']\n", " elb_dict[\"region\"] = reg\n", " all_load_balancers.append(elb_dict)\n", " for lb in elb_response['LoadBalancerDescriptions']:\n", " elb_dict = {}\n", " elb_dict[\"load_balancer_name\"] = lb['LoadBalancerName']\n", " elb_dict[\"load_balancer_type\"] = 'classic'\n", " elb_dict[\"load_balancer_dns\"] = lb['DNSName']\n", " elb_dict[\"region\"] = reg\n", " all_load_balancers.append(elb_dict)\n", " except Exception as e:\n", " pass\n", " for load_balancer in all_load_balancers:\n", " if load_balancer['load_balancer_type']=='network' or load_balancer['load_balancer_type']=='application':\n", " elbv2Client = handle.client('elbv2', region_name=load_balancer['region'])\n", " target_groups = elbv2Client.describe_target_groups(\n", " LoadBalancerArn=load_balancer['load_balancer_arn']\n", " )\n", " if len(target_groups['TargetGroups']) == 0:\n", " elb_dict = {}\n", " elb_dict[\"elb_arn\"] = load_balancer['load_balancer_arn']\n", " elb_dict[\"elb_name\"] = load_balancer['load_balancer_name']\n", " elb_dict[\"region\"] = load_balancer['region']\n", " elb_dict[\"type\"] = load_balancer['load_balancer_type']\n", " result.append(elb_dict)\n", " else:\n", " elbClient = handle.client('elb', region_name=load_balancer['region'])\n", " res = elbClient.describe_instance_health(\n", " LoadBalancerName=load_balancer['load_balancer_name'],\n", " )\n", " if len(res['InstanceStates'])==0:\n", " elb_dict = {}\n", " elb_dict[\"elb_name\"] = load_balancer['load_balancer_name']\n", " elb_dict[\"region\"] = load_balancer['region']\n", " elb_dict[\"type\"] = load_balancer['load_balancer_type']\n", " result.append(elb_dict)\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not elb_arns and not elb_names\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"elbs_with_no_targets\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_find_elbs_with_no_targets_or_instances, lego_printer=aws_find_elbs_with_no_targets_or_instances_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "a311041f-620a-4b6b-914f-e52c6c3a71f4", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "elbs_with_no_targets
This action filters regions that have no ELB's without targets and instances and creates a list of those that have them.
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "b85ce542-bdf0-44d2-9e75-213002d5c036", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-05-17T06:12:51.827Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of ELBs with no targets or instances", "orderProperties": [], "tags": [], "title": "Create List of ELBs with no targets or instances" }, "outputs": [], "source": [ "elb_classic_list = []\n", "elbv2_list = []\n", "try:\n", " for res in elbs_with_no_targets:\n", " if type(res)==bool:\n", " if res == False:\n", " continue\n", " elif type(res)==list:\n", " if len(res)!=0:\n", " for elb in res:\n", " if 'elb_arn' in elb.keys():\n", " elbv2_list.append(elb)\n", " else:\n", " elb_classic_list.append(elb)\n", "except Exception:\n", " if elb_arns:\n", " for arn in elb_arns:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"elb_arn\"] = arn\n", " elbv2_list.append(data_dict)\n", " if elb_names:\n", " for name in elb_names:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"elb_name\"] = name\n", " elb_classic_list.append(data_dict)\n", "print(\"Network/Application Load Balancers\",\"\\n\",elbv2_list, \"\\n\", \"Classic Load Balancers\", \"\\n\", elb_classic_list)" ] }, { "cell_type": "markdown", "id": "9fb3704a-9b19-49c4-96ab-a982217bbcd3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2 - Part 1", "orderProperties": [], "tags": [], "title": "Step 2 - Part 1" }, "source": [ "This action captures the following output:
\n", "elb_classic_list, elbv2_list
This action deletes Network and Application ELBs found in Step 1.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "a3e314ad-8dce-4a3b-bf68-29b33a1f7387", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS" ], "actionDescription": "AWS Delete Load Balancer", "actionEntryFunction": "aws_delete_load_balancer", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Delete Load Balancer", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "bb9ec9e116f23c18a3974ae84f985b60a62db4bf6a03bfe367b7881227ceac8b", "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "AWS Delete Load Balancer", "id": 1, "index": 1, "inputData": [ { "elb_arn": { "constant": false, "value": "\"iter.get(\\\\\"elb_arn\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "elb_arn": { "description": "Load Balancer ARN of the ALB/NLB type Load Balancer.", "title": "Load Balancer ARN (ALB/NLB type)", "type": "string" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [ "elb_arn", "region" ], "title": "aws_delete_load_balancer", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "elb_arn": "elb_arn", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "elbv2_list" } } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Delete Load Balancer", "orderProperties": [ "elb_arn", "region" ], "printOutput": true, "startcondition": "len(elbv2_list)!=0", "tags": [ "aws_delete_load_balancer" ], "uuid": "bb9ec9e116f23c18a3974ae84f985b60a62db4bf6a03bfe367b7881227ceac8b", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "import pprint\n", "from typing import Dict\n", "from pydantic import BaseModel, Field\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_delete_load_balancer_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_delete_load_balancer(handle, region: str, elb_arn: str) -> Dict:\n", " \"\"\"aws_delete_load_balancer dict of loadbalancers info.\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :type elb_arn: string\n", " :param elb_arn: load balancer ARNs.\n", "\n", " :rtype: dict of load balancers info.\n", " \"\"\"\n", " try:\n", " elbv2Client = handle.client('elbv2', region_name=region)\n", " response = elbv2Client.delete_load_balancer(LoadBalancerArn=elb_arn)\n", " return response\n", " except Exception as e:\n", " raise Exception(e)\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\",\n", " \"elb_arn\": \"iter.get(\\\\\"elb_arn\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"elbv2_list\",\n", " \"iter_parameter\": [\"region\",\"elb_arn\"]\n", " }''')\n", "\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(elbv2_list)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_delete_load_balancer, lego_printer=aws_delete_load_balancer_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "21e8bef7-c3a3-47a4-9b63-ea57b3cd9043", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2 - Part 2", "orderProperties": [], "tags": [], "title": "Step 2 - Part 2" }, "source": [ "This action takes the following parameters:
\n", "region, elb_arn
This action deletes Classic ELBs found in Step 1.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "b700ed80-11dd-4aa8-b6e0-075cccf26b7b", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS" ], "actionDescription": "Delete Classic Elastic Load Balancers", "actionEntryFunction": "aws_delete_classic_load_balancer", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Delete Classic Load Balancer", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "9b88908a212472ac94ac7ce98a854c1a16f853e87f9c5a8cd5db236b637ad5d3", "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "Delete Classic Elastic Load Balancers", "id": 2, "index": 2, "inputData": [ { "elb_name": { "constant": false, "value": "\"iter.get(\\\\\"elb_name\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "elb_name": { "description": "Name of classic ELB", "title": "Classic Load Balancer Name", "type": "string" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [ "region", "elb_name" ], "title": "aws_delete_classic_load_balancer", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "elb_name": "elb_name", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "elb_classic_list" } } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Delete Classic Load Balancer", "orderProperties": [ "region", "elb_name" ], "printOutput": true, "startcondition": "len(elb_classic_list)!=0", "tags": [ "aws_delete_classic_load_balancer" ], "uuid": "9b88908a212472ac94ac7ce98a854c1a16f853e87f9c5a8cd5db236b637ad5d3", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Dict\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_delete_classic_load_balancer_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_delete_classic_load_balancer(handle, region: str, elb_name: str) -> Dict:\n", " \"\"\"aws_delete_classic_load_balancer reponse of deleting a classic load balancer.\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :type elb_name: string\n", " :param elb_name: Classic load balancer name.\n", "\n", " :rtype: dict of deleted load balancers reponse.\n", " \"\"\"\n", " try:\n", " elblient = handle.client('elb', region_name=region)\n", " response = elblient.delete_load_balancer(LoadBalancerName=elb_name)\n", " return response\n", " except Exception as e:\n", " raise Exception(e)\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\",\n", " \"elb_name\": \"iter.get(\\\\\"elb_name\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"elb_classic_list\",\n", " \"iter_parameter\": [\"region\",\"elb_name\"]\n", " }''')\n", "\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(elb_classic_list)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_delete_classic_load_balancer, lego_printer=aws_delete_classic_load_balancer_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "9c7430c8-3660-45bd-90ef-9ceab77e3daa", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "region, elb_name
In this Runbook, we were able to check for any AWS Elastic Load Balancers with no target groups or instances in our AWS account and release (remove) them in order to lower AWS costs. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Delete AWS ELBs With No Targets Or Instances", "parameters": null }, "kernelspec": { "display_name": "unSkript (Build: 1234)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "elb_arns": { "description": "List of ELB ARNs for type Network and Application Load Balancer", "title": "elb_arns", "type": "array" }, "elb_names": { "description": "List of ELB Names for Classic load balancers", "title": "elb_names", "type": "array" }, "region": { "description": "AWS Region to search for unattached Elastic IPs. Eg: \"us-west-2\"", "title": "region", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Delete_ELBs_With_No_Targets_Or_Instances.json ================================================ { "name": "Delete AWS ELBs With No Targets Or Instances", "description": "ELBs are used to distribute incoming traffic across multiple targets or instances, but if those targets or instances are no longer in use, then the ELBs may be unnecessary and can be deleted to save costs. Deleting ELBs with no targets or instances is a simple but effective way to optimize costs in your AWS environment. By identifying and removing these unused ELBs, you can reduce the number of resources you are paying for and avoid unnecessary charges. This runbook helps you identify all types of ELB's- Network, Application, Classic that don't have any target groups or instances attached to them.", "uuid": "2aba76792cb2802cae55deb60d28820522aeba93865572a1e9c7ddc5309e1312", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Delete_IAM_User.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "8a97b231-94d6-4e10-a24c-6eac9a4572e4", "metadata": { "jupyter": { "source_hidden": false }, "name": "delete IAM User", "orderProperties": [], "tags": [], "title": "delete IAM User" }, "source": [ "\n", "
1. Find Old EBS Snapshots
2. Delete old EBS snapshots
Using unSkript's Filter AWS Find Old EBS Snapshots action, we will find old snapshots given a threshold number of days.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region, threhold_days
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "a3cd0833-ab78-452c-bf5f-790fefa28d20", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EBS" ], "actionDescription": "This action list a all snapshots details that are older than the threshold", "actionEntryFunction": "aws_filter_old_ebs_snapshots", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "303d6481e8cfa508d9ba11f847906c7d46f30a1c70f9b6b0e04b12409e74f704" ], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Filter Old EBS Snapshots", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "9a74af3d2bb5a9aac60e5d30fb89b3ebf6867ce4782fc629cd9842bd5156a327", "collapsed": true, "condition_enabled": true, "continueOnError": false, "description": "This action list a all snapshots details that are older than the threshold", "id": 1, "index": 1, "inputData": [ { "region": { "constant": false, "value": "region" }, "threshold": { "constant": false, "value": "int(threshold_days)" } } ], "inputschema": [ { "properties": { "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "threshold": { "default": 30, "description": "(in day's) The threshold to check the snapshots older than the threshold.", "title": "Threshold (in days)", "type": "integer" } }, "required": [], "title": "aws_filter_old_ebs_snapshots", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Filter Old EBS Snapshots", "orderProperties": [ "region", "threshold" ], "outputParams": { "output_name": "unused_snapshots", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not snapshot_ids", "tags": [ "aws_filter_old_ebs_snapshots" ], "uuid": "9a74af3d2bb5a9aac60e5d30fb89b3ebf6867ce4782fc629cd9842bd5156a327", "version": "1.0.0", "credentialsJson": {} }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import List, Optional, Tuple\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "import pprint\n", "from datetime import datetime, timedelta\n", "import pytz\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_filter_old_ebs_snapshots_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_filter_old_ebs_snapshots(handle, region: str=\"\", threshold: int = 30) -> Tuple:\n", " \"\"\"aws_filter_old_ebs_snapshots Returns an array of EBS snapshots details.\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :type threshold: int\n", " :param threshold: (in days) The threshold to check the snapshots older than the threshold.\n", "\n", " :rtype: List of EBS snapshots details.\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", " for reg in all_regions:\n", " try:\n", " # Filtering the volume by region\n", " current_time = datetime.now(pytz.UTC)\n", " ec2Client = handle.resource('ec2', region_name=reg)\n", " response = ec2Client.snapshots.filter(OwnerIds=['self'])\n", " for snapshot in response:\n", " snap_data = {}\n", " running_time = current_time - snapshot.start_time\n", " if running_time > timedelta(days=int(threshold)):\n", " snap_data[\"region\"] = reg\n", " snap_data[\"snapshot_id\"] = snapshot.id\n", " result.append(snap_data)\n", " except Exception as e:\n", " pass\n", " if len(result)!=0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"threshold\": \"int(threshold_days)\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not snapshot_ids\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"unused_snapshots\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_filter_old_ebs_snapshots, lego_printer=aws_filter_old_ebs_snapshots_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "6b8b31be", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 Extension", "orderProperties": [], "tags": [], "title": "Step-1 Extension" }, "source": [ "This action captures the following output:
\n", "unused_snapshots
This action filters regions that have no old EBS snapshots and creates a list of those that have them.
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "aa209041-9097-4b16-be3c-3a30aff1eb1e", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Old EBS Snapshots", "orderProperties": [], "tags": [], "title": "Create List of Old EBS Snapshots" }, "outputs": [], "source": [ "all_unused_snapshots = []\n", "dummy = []\n", "try:\n", " if unused_snapshots[0] == False:\n", " for snapshot in unused_snapshots[1]:\n", " all_unused_snapshots.append(snapshot)\n", "except Exception as e:\n", " if snapshot_ids:\n", " for snap in snapshot_ids:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"snapshot_id\"] = snap\n", " all_unused_snapshots.append(data_dict)\n", " else:\n", " raise Exception(e)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "3c3a62dd", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_unused_snapshots
This action deletes old EBS Snapshots found in Step 1.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "a30bb183-cef8-43b5-a75d-ce3ab3db0dac", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionOutputType": null, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "1bcf78d4587707b18b241fa00fd709e4ce3c3bc28ab24c9874e9b0966b08e43a", "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "Delete EBS Snapshot for an EC2 instance", "id": 2, "index": 2, "inputData": [ { "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" }, "snapshot_id": { "constant": false, "value": "\"iter.get(\\\\\"snapshot_id\\\\\")\"" } } ], "inputschema": [ { "properties": { "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "snapshot_id": { "description": "EBS snapshot ID. Eg: \"snap-34bt4bfjed9d\"", "title": "Snapshot ID", "type": "string" } }, "required": [ "region", "snapshot_id" ], "title": "aws_delete_ebs_snapshot", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "region": "region", "snapshot_id": "snapshot_id" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_unused_snapshots" } } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "metadata": { "action_bash_command": false, "action_categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_EBS" ], "action_description": "Delete EBS Snapshot for an EC2 instance", "action_entry_function": "aws_delete_ebs_snapshot", "action_is_check": false, "action_is_remediation": false, "action_needs_credential": true, "action_next_hop": null, "action_next_hop_parameter_mapping": null, "action_nouns": null, "action_output_type": "ACTION_OUTPUT_TYPE_LIST", "action_supports_iteration": true, "action_supports_poll": true, "action_title": "AWS Delete EBS Snapshot", "action_type": "LEGO_TYPE_AWS", "action_verbs": null, "action_version": "1.0.0" }, "name": "AWS Delete EBS Snapshot", "orderProperties": [ "region", "snapshot_id" ], "printOutput": true, "startcondition": "len(all_unused_snapshots)!=0", "tags": [], "uuid": "1bcf78d4587707b18b241fa00fd709e4ce3c3bc28ab24c9874e9b0966b08e43a", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import List, Dict\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_delete_ebs_snapshot_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_delete_ebs_snapshot(handle, region: str, snapshot_id: str) -> Dict:\n", " \"\"\"aws_delete_ebs_snapshot Returns a dict of deleted snapshot details\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :type snapshot_id: string\n", " :param snapshot_id: EBS snapshot ID. Eg: 'snap-34bt4bfjed9d'\n", "\n", " :rtype: Deleted snapshot details\n", " \"\"\"\n", " result = []\n", " try:\n", " ec2Client = handle.client('ec2', region_name=region)\n", " result = ec2Client.delete_snapshot(SnapshotId=snapshot_id)\n", " except Exception as e:\n", " raise e\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\",\n", " \"snapshot_id\": \"iter.get(\\\\\"snapshot_id\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_unused_snapshots\",\n", " \"iter_parameter\": [\"region\",\"snapshot_id\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_unused_snapshots)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_delete_ebs_snapshot, lego_printer=aws_delete_ebs_snapshot_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "b6288138", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "region, snapshot_id
In this Runbook, we were able to filter old EBS Snapshots given a threshold number of days and delete them. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Delete Old EBS Snapshots", "parameters": null }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "region": { "description": "AWS Regions to get the EBS Snapshots from. Eg: us-west-2. If nothing is given all regions will be considered.", "title": "region", "type": "string" }, "snapshot_ids": { "description": "List of EBS Snapshot IDs. Eg: [\"snap-0kwre234dew3w\",...]", "title": "snapshot_ids", "type": "array" }, "threshold_days": { "default": 30, "description": "The threshold number of days to check the unused streams", "title": "threshold_days", "type": "number" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Delete_Old_EBS_Snapshots.json ================================================ { "name": "Delete Old EBS Snapshots", "description": "Amazon Elastic Block Store (EBS) snapshots are created incrementally, an initial snapshot will include all the data on the disk, and subsequent snapshots will only store the blocks on the volume that have changed since the prior snapshot. Unchanged data is not stored, but referenced using the previous snapshot. This runbook helps us to find old EBS snapshots and thereby lower storage costs.", "uuid": "303d6481e8cfa508d9ba11f847906c7d46f30a1c70f9b6b0e04b12409e74f704", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Delete_RDS_Instances_with_Low_CPU_Utilization.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "5424264e-6195-4cf9-906b-24b02d5a83f3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Find RDS Instances with Low CPU Utilization
2) Delete RDS Instances
Using unSkript's Find RDS Instances with Low CPU Utilization action, we will find instances with a low CPU utilization given a threshold percentage using the CPUUtilization attribue found in the statistics list of get_metric_statistics API of Cloudwatch.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region, threshold, duration_minutes
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "ea17fd11-3ae9-4bdf-9ff8-27f656a5de48", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS_RDS", "CATEGORY_TYPE_AWS" ], "actionDescription": "This lego finds RDS instances are not utilizing their CPU resources to their full potential.", "actionEntryFunction": "aws_find_rds_instances_with_low_cpu_utilization", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "655835b762ba634f02074a48e4bae12f7a3e29bb8e6776eb8d657ddbfe181a59" ], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Find RDS Instances with low CPU Utilization", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "8d01f8abc8274090c2325ef32905b2649a6af779ce86f78b9e9712ad1d482165", "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "This lego finds RDS instances are not utilizing their CPU resources to their full potential.", "id": 1, "index": 1, "inputData": [ { "duration_minutes": { "constant": false, "value": "int(duration_minutes)" }, "region": { "constant": false, "value": "region" }, "utilization_threshold": { "constant": false, "value": "int(threshold)" } } ], "inputschema": [ { "properties": { "duration_minutes": { "default": 5, "description": "Value in minutes to get the start time of the metrics for CPU Utilization", "title": "Duration of Start time", "type": "integer" }, "region": { "default": "", "description": "AWS Region to get the RDS Instance", "title": "AWS Region", "type": "string" }, "utilization_threshold": { "default": 10, "description": "The threshold percentage of CPU utilization for an RDS Instance.", "title": "CPU Utilization Threshold", "type": "integer" } }, "required": [], "title": "aws_find_rds_instances_with_low_cpu_utilization", "type": "object" } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Find RDS Instances with low CPU Utilization", "orderProperties": [ "region", "duration_minutes", "utilization_threshold" ], "outputParams": { "output_name": "low_cpu_instances", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not db_identifiers", "tags": [ "aws_find_rds_instances_with_low_cpu_utilization" ], "uuid": "8d01f8abc8274090c2325ef32905b2649a6af779ce86f78b9e9712ad1d482165", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Tuple\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "from unskript.connectors.aws import aws_get_paginator\n", "import pprint\n", "from datetime import datetime,timedelta\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_find_rds_instances_with_low_cpu_utilization_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_find_rds_instances_with_low_cpu_utilization(handle, utilization_threshold:int=10, region: str = \"\", duration_minutes:int=5) -> Tuple:\n", " \"\"\"aws_find_rds_instances_with_low_cpu_utilization finds RDS instances that have a lower cpu utlization than the given threshold\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: string\n", " :param region: Region of the RDS.\n", "\n", " :type utilization_threshold: integer\n", " :param utilization_threshold: The threshold percentage of CPU utilization for an RDS Instance.\n", "\n", " :type duration_minutes: integer\n", " :param duration_minutes: Value in minutes to get the start time of the metrics for CPU Utilization\n", "\n", " :rtype: status, list of instances and their region.\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", " for reg in all_regions:\n", " try:\n", " rdsClient = handle.client('rds', region_name=reg)\n", " cloudwatchClient = handle.client('cloudwatch', region_name=reg)\n", " all_instances = aws_get_paginator(rdsClient, \"describe_db_instances\", \"DBInstances\")\n", " for db in all_instances:\n", " response = cloudwatchClient.get_metric_data(\n", " MetricDataQueries=[\n", " {\n", " 'Id': 'cpu',\n", " 'MetricStat': {\n", " 'Metric': {\n", " 'Namespace': 'AWS/RDS',\n", " 'MetricName': 'CPUUtilization',\n", " 'Dimensions': [\n", " {\n", " 'Name': 'DBInstanceIdentifier',\n", " 'Value': db['DBInstanceIdentifier']\n", " },\n", " ]\n", " },\n", " 'Period': 60,\n", " 'Stat': 'Average',\n", " },\n", " 'ReturnData': True,\n", " },\n", " ],\n", " StartTime=(datetime.now() - timedelta(minutes=duration_minutes)).isoformat(),\n", " EndTime=datetime.utcnow().isoformat(),\n", " )\n", " if 'Values' in response['MetricDataResults'][0]:\n", " cpu_utilization = response['MetricDataResults'][0]['Values'][0]\n", " if cpu_utilization < utilization_threshold:\n", " db_instance_dict = {}\n", " db_instance_dict[\"region\"] = reg\n", " db_instance_dict[\"instance\"] = db['DBInstanceIdentifier']\n", " result.append(db_instance_dict)\n", " except Exception as error:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"duration_minutes\": \"int(duration_minutes)\",\n", " \"utilization_threshold\": \"int(threshold)\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not db_identifiers\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"low_cpu_instances\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_find_rds_instances_with_low_cpu_utilization, lego_printer=aws_find_rds_instances_with_low_cpu_utilization_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "199591ef-cb3a-49b7-b515-3c6998050320", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "low_cpu_instances
This action filters regions that have no clusters with low CPU utilization and creates a list of those that have them.
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "6a10e980-9f17-4436-9166-90ea130aa316", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-05-02T17:15:25.139Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Low CPU Utilization RDS Instances", "orderProperties": [], "tags": [], "title": "Create List of Low CPU Utilization RDS Instances" }, "outputs": [], "source": [ "all_low_cpu_instances = []\n", "dummy = []\n", "try:\n", " if low_cpu_instances[0] == False:\n", " if len(low_cpu_instances[1]) != 0:\n", " all_low_cpu_instances = low_cpu_instances[1]\n", "except Exception:\n", " for ins_identifier in db_identifiers:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"instance\"] = ins_identifier\n", " all_low_cpu_instances.append(data_dict)\n", "print(all_low_cpu_instances)" ] }, { "cell_type": "markdown", "id": "978d3b61-2fd9-461d-89bd-534d2dcf3b63", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_low_cpu_instances
This action deletes instances found in Step 1.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "31f99d97-b51b-45c1-b2ba-b0bdb10505ff", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_RDS" ], "actionDescription": "Delete AWS RDS Instance", "actionEntryFunction": "aws_delete_rds_instance", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": false, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Delete RDS Instance", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "11b88b8c6290219912511a30bfb913bc67f7759a6a1298612ed0ac37e381c8f2", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "Delete AWS RDS Instance", "id": 4, "index": 4, "inputData": [ { "instance_id": { "constant": false, "value": "\"iter.get(\\\\\"instance\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "instance_id": { "description": "The DB instance identifier for the DB instance to be deleted. This parameter isn\u2019t case-sensitive.", "title": "RDS DB Identifier", "type": "string" }, "region": { "description": "AWS region of instance identifier", "title": "AWS Region", "type": "string" } }, "required": [ "instance_id", "region" ], "title": "aws_delete_rds_instance", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "instance_id": "instance", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_low_cpu_instances" } } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Delete RDS Instance", "orderProperties": [ "instance_id", "region" ], "printOutput": true, "startcondition": "len(all_low_cpu_instances)!=0", "tags": [ "aws_delete_rds_instance" ], "uuid": "11b88b8c6290219912511a30bfb913bc67f7759a6a1298612ed0ac37e381c8f2", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2023 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Dict\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_delete_rds_instance_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_delete_rds_instance(handle, region: str, instance_id: str) -> Dict:\n", " \"\"\"aws_delete_rds_instance dict of response.\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :type instance_id: string\n", " :param instance_id: The DB instance identifier for the DB instance to be deleted. This parameter isn\u2019t case-sensitive.\n", "\n", " :rtype: dict of response of deleting an RDS instance\n", " \"\"\"\n", " try:\n", " ec2Client = handle.client('rds', region_name=region)\n", " response = ec2Client.delete_db_instance(DBInstanceIdentifier=instance_id)\n", " return response\n", " except Exception as e:\n", " raise Exception(e)\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=False)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\",\n", " \"instance_id\": \"iter.get(\\\\\"instance\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_low_cpu_instances\",\n", " \"iter_parameter\": [\"region\",\"instance_id\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_low_cpu_instances)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_delete_rds_instance, lego_printer=aws_delete_rds_instance_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "44a6cf05-385b-445d-a503-ad4aa607a568", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "instance_id, region
In this Runbook, we were able to filter low CPU utilization RDS Instance given threshold percentage and delete them. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Delete RDS Instances with Low CPU Utilization", "parameters": [ "region", "threshold_days" ] }, "kernelspec": { "display_name": "unSkript (Build: 1169)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "db_identifiers": { "description": "List of RDS Db identifiers.", "title": "db_identifiers", "type": "array" }, "duration_minutes": { "default": 5, "description": "Start time value in minutes to get the start time of metrics collection", "title": "duration_minutes", "type": "number" }, "region": { "description": "AWS Region to get the RDS Instances from. Eg: \"us-west-2\". If nothing is given all regions will be considered.", "title": "region", "type": "string" }, "threshold": { "default": 10, "description": "Threshold (in percent) to check for the CPU utilization of RDS Instances below the given threshold.", "title": "threshold", "type": "number" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Delete_RDS_Instances_with_Low_CPU_Utilization.json ================================================ { "name": "Delete RDS Instances with Low CPU Utilization", "description": "Deleting RDS instances with low CPU utilization is a cost optimization strategy that involves identifying RDS instances with consistently low CPU usage and deleting them to save costs. This approach helps to eliminate unnecessary costs associated with running idle database instances that are not being fully utilized. This runbook helps us to find and delete such instances.", "uuid": "655835b762ba634f02074a48e4bae12f7a3e29bb8e6776eb8d657ddbfe181a59", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Delete_Redshift_Clusters_with_Low_CPU_Utilization.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "5424264e-6195-4cf9-906b-24b02d5a83f3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Find Redshift Clusters with Low CPU Utilization
2) Delete Redshift Cluster
Using unSkript's Find Redshift Clusters with Low CPU Utilization action, we will find instances with a low CPU utilization given a threshold percentage using the CPUUtilization attribue found in the statistics list of get_metric_statistics API of Cloudwatch.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region, threshold, duration_minutes
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "1424fc3d-ad1a-4614-ad08-bbb1d7151b9f", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_REDSHIFT", "CATEGORY_TYPE_AWS_CLOUDWATCH" ], "actionDescription": "Find underutilized Redshift clusters in terms of CPU utilization.", "actionEntryFunction": "aws_find_redshift_clusters_with_low_cpu_utilization", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Find Redshift Clusters with low CPU Utilization", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "27f2812eb37ee235c60584748f430bde0f1df9f7744b91c6148fa647d270dac8", "collapsed": true, "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "Find underutilized Redshift clusters in terms of CPU utilization.", "execution_data": { "last_date_success_run_cell": "2023-05-04T11:22:35.582Z" }, "id": 1, "index": 1, "inputData": [ { "duration_minutes": { "constant": false, "value": "int(duration_minutes)" }, "region": { "constant": false, "value": "iter_item" }, "utilization_threshold": { "constant": false, "value": "int(threshold)" } } ], "inputschema": [ { "properties": { "duration_minutes": { "default": 5, "description": "Value in minutes to determine the start time of the data points. ", "title": "Duration (in minutes)", "type": "integer" }, "region": { "default": "", "description": "AWS Region to get the Redshift Cluster", "title": "AWS Region", "type": "string" }, "utilization_threshold": { "default": 10, "description": "The threshold value in percent of CPU utilization of the Redshift cluster", "title": "CPU utilization threshold(in %)", "type": "integer" } }, "required": [], "title": "aws_find_redshift_clusters_with_low_cpu_utilization", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": "region", "iter_list": { "constant": false, "objectItems": false, "value": "region" } } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Find Redshift Clusters with low CPU Utilization", "orderProperties": [ "region", "duration_minutes", "utilization_threshold" ], "outputParams": { "output_name": "low_cpu_clusters", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not cluster_identifiers", "tags": [ "aws_find_redshift_clusters_with_low_cpu_utilization" ], "uuid": "27f2812eb37ee235c60584748f430bde0f1df9f7744b91c6148fa647d270dac8", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Tuple\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "import pprint\n", "from datetime import datetime,timedelta\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_find_redshift_clusters_with_low_cpu_utilization_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_find_redshift_clusters_with_low_cpu_utilization(handle, utilization_threshold:int=10, region: str = \"\", duration_minutes:int=5) -> Tuple:\n", " \"\"\"aws_find_redshift_clusters_with_low_cpu_utilization finds Redshift Clusters that have a lower cpu utlization than the given threshold\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: string\n", " :param region: Region of the Cluster.\n", "\n", " :type utilization_threshold: integer\n", " :param utilization_threshold: The threshold percentage of CPU utilization for a Redshift Cluster.\n", "\n", " :type duration_minutes: integer\n", " :param duration_minutes: The threshold percentage of CPU utilization for a Redshift Cluster.\n", "\n", " :rtype: status, list of clusters and their region.\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", " for reg in all_regions:\n", " try:\n", " redshiftClient = handle.client('redshift', region_name=reg)\n", " cloudwatchClient = handle.client('cloudwatch', region_name=reg)\n", " for cluster in redshiftClient.describe_clusters()['Clusters']:\n", " cluster_identifier = cluster['ClusterIdentifier']\n", " response = cloudwatchClient.get_metric_statistics(\n", " Namespace='AWS/Redshift',\n", " MetricName='CPUUtilization',\n", " Dimensions=[\n", " {\n", " 'Name': 'ClusterIdentifier',\n", " 'Value': cluster_identifier\n", " }\n", " ],\n", " StartTime=(datetime.utcnow() - timedelta(minutes=duration_minutes)).isoformat(),\n", " EndTime=datetime.utcnow().isoformat(),\n", " Period=60,\n", " Statistics=['Average']\n", " )\n", " if len(response['Datapoints']) != 0:\n", " cpu_usage_percent = response['Datapoints'][-1]['Average']\n", " if cpu_usage_percent < utilization_threshold:\n", " cluster_dict = {}\n", " cluster_dict[\"region\"] = reg\n", " cluster_dict[\"cluster\"] = cluster_identifier\n", " result.append(cluster_dict)\n", " except Exception:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"duration_minutes\": \"int(duration_minutes)\",\n", " \"utilization_threshold\": \"int(threshold)\",\n", " \"region\": \"iter_item\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"region\",\n", " \"iter_parameter\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not cluster_identifiers\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"low_cpu_clusters\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_find_redshift_clusters_with_low_cpu_utilization, lego_printer=aws_find_redshift_clusters_with_low_cpu_utilization_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "199591ef-cb3a-49b7-b515-3c6998050320", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "low_cpu_clusters
This action filters regions that have no clusters with low CPU utilization and creates a list of those that have them.
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "6a10e980-9f17-4436-9166-90ea130aa316", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-05-04T11:22:38.609Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Low CPU Utilization RDS Instances", "orderProperties": [], "tags": [], "title": "Create List of Low CPU Utilization RDS Instances" }, "outputs": [], "source": [ "all_low_cpu_clusters = []\n", "try:\n", " for res in low_cpu_clusters:\n", " if type(res)==bool:\n", " if res == False:\n", " continue\n", " elif type(res)==list:\n", " if len(res)!=0:\n", " all_low_cpu_clusters=res\n", "except Exception:\n", " for ins_identifier in cluster_identifiers:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"cluster\"] = ins_identifier\n", " all_low_cpu_clusters.append(data_dict)\n", "print(all_low_cpu_clusters)\n", "task.configure(outputName=\"all_low_cpu_instances\")" ] }, { "cell_type": "markdown", "id": "978d3b61-2fd9-461d-89bd-534d2dcf3b63", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_low_cpu_clusters
This action deletes instances found in Step 1. By default, the skip final cluster screenshot is set to False. This setting will not take a final screenshot of the cluster.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "267106f2-0625-4a8e-a9e6-4d4e35bcb474", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_REDSHIFT" ], "actionDescription": "Delete AWS Redshift Cluster", "actionEntryFunction": "aws_delete_redshift_cluster", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Delete Redshift Cluster", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "6d9934159356d4290f164d36cdd42609f8916a87d4d68f6271bb8634f12485b4", "collapsed": true, "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "Delete AWS Redshift Cluster", "id": 7, "index": 7, "inputData": [ { "cluster_identifier": { "constant": false, "value": "\"iter.get(\\\\\"cluster\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" }, "skip_final_cluster_snapshot": { "constant": true, "value": false } } ], "inputschema": [ { "properties": { "cluster_identifier": { "description": "The identifier of the cluster to be deleted.", "title": "Cluster Identifier", "type": "string" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "skip_final_cluster_snapshot": { "default": false, "description": "Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true, a final cluster snapshot is not created. If false, a final cluster snapshot is created before the cluster is deleted.", "title": "Skip Final Cluster Snapshot", "type": "boolean" } }, "required": [ "region", "cluster_identifier" ], "title": "aws_delete_redshift_cluster", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "cluster_identifier": "cluster", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_low_cpu_clusters" } } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Delete Redshift Cluster", "orderProperties": [ "region", "cluster_identifier", "skip_final_cluster_snapshot" ], "printOutput": true, "startcondition": "len(all_low_cpu_clusters)!=0", "tags": [ "aws_delete_redshift_cluster" ], "uuid": "6d9934159356d4290f164d36cdd42609f8916a87d4d68f6271bb8634f12485b4", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2023 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Dict\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_delete_redshift_cluster_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_delete_redshift_cluster(handle, region: str, cluster_identifier: str, skip_final_cluster_snapshot:bool=False) -> Dict:\n", " \"\"\"aws_delete_redshift_cluster dict response.\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :type cluster_identifier: string\n", " :param cluster_identifier: The identifier of the cluster to be deleted.\n", "\n", " :type skip_final_cluster_snapshot: boolean\n", " :param skip_final_cluster_snapshot: Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true, a final cluster snapshot is not created. If false, a final cluster snapshot is created before the cluster is deleted.\n", "\n", " :rtype: dict of response\n", " \"\"\"\n", " try:\n", " redshiftClient = handle.client('redshift', region_name=region)\n", " response = redshiftClient.delete_cluster(\n", " ClusterIdentifier=cluster_identifier,\n", " SkipFinalClusterSnapshot=skip_final_cluster_snapshot\n", " )\n", " return response\n", " except Exception as e:\n", " raise Exception(e)\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"skip_final_cluster_snapshot\": \"False\",\n", " \"cluster_identifier\": \"iter.get(\\\\\"cluster\\\\\")\",\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_low_cpu_clusters\",\n", " \"iter_parameter\": [\"cluster_identifier\",\"region\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_low_cpu_clusters)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_delete_redshift_cluster, lego_printer=aws_delete_redshift_cluster_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "44a6cf05-385b-445d-a503-ad4aa607a568", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "cluster, region, skip_final_cluster_screenshot
In this Runbook, we were able to filter low CPU utilization Redshift Clusters given threshold percentage and delete them. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Delete Redshift Clusters with Low CPU Utilization", "parameters": [ "region", "threshold_days" ] }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "cluster_identifiers": { "description": "List of Redshift Clusters identifiers.", "title": "cluster_identifiers", "type": "array" }, "duration_minutes": { "default": 5, "description": "Start time value in minutes to get the start time of metrics collection", "title": "duration_minutes", "type": "number" }, "region": { "description": "AWS Region to get the Redshift Clusters from. Eg: \"us-west-2\". If nothing is given all regions will be considered.", "title": "region", "type": "string" }, "threshold": { "default": 10, "description": "Threshold (in percent) to check for the CPU utilization of Redshift Clusters below the given threshold.", "title": "threshold", "type": "number" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Delete_Redshift_Clusters_with_Low_CPU_Utilization.json ================================================ { "name": "Delete Redshift Clusters with Low CPU Utilization", "description": "Redshift clusters are the basic units of compute and storage in Amazon Redshift, and they can be configured to meet specific performance and cost requirements. In order to optimize the cost and performance of Redshift clusters, it is important to regularly monitor their CPU utilization. If a cluster is consistently showing low CPU utilization over an extended period of time, it may be a good idea to delete the cluster to save costs. This runbook helps us find such clusters and delete them.", "uuid": "2a51c98c5c99d132011e285546e365402351fd3d09214041aea7592367bd48bf", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Delete_Unattached_EBS_Volume.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "b526603d-f9fd-4074-adc3-f83dfee4ec85", "metadata": { "jupyter": { "source_hidden": false }, "name": "Runbook Overview", "orderProperties": [], "tags": [], "title": "Runbook Overview" }, "source": [ "1)Filter AWS Unattached EBS Volume
2)Create Snapshot Of EBS Volume
3)Delete EBS Volume
Here we will use unSkript Filter AWS Unattached EBS Volume action. This action filters all the EBS volumes from the given region and returns a list of all the unattached EBS volumes. It will execute if the ebs_volume parameter is not passed.
\n", "\n", "Input parameters:
\n", "region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "3209f960-b7ea-4858-8dba-27fd7165ff06", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2", "CATEGORY_TYPE_AWS_EBC" ], "actionDescription": "Filter AWS Unattached EBS Volume", "actionEntryFunction": "aws_filter_ebs_unattached_volumes", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "da23633be34037f023e1c1f56220ec75eb2729d7d8eb2bca9badec15ed0fd2ca" ], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Filter AWS Unattached EBS Volume", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "375a1a2a5100b3a99ab867f9fcd54d46e2128dafc69dbbc03bb2083d56668cf4", "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "Filter AWS Unattached EBS Volume", "execution_data": { "last_date_success_run_cell": "2023-05-17T16:19:59.901Z" }, "id": 4, "index": 4, "inputData": [ { "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [], "title": "aws_filter_ebs_unattached_volumes", "type": "object" } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Filter AWS Unattached EBS Volume", "orderProperties": [ "region" ], "outputParams": { "output_name": "unattached_volumes", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not ebs_volume", "tags": [ "aws_filter_ebs_unattached_volumes" ], "uuid": "375a1a2a5100b3a99ab867f9fcd54d46e2128dafc69dbbc03bb2083d56668cf4", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Tuple\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_filter_ebs_unattached_volumes_printer(output):\n", " if output is None:\n", " return\n", "\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_filter_ebs_unattached_volumes(handle, region: str = \"\") -> Tuple:\n", " \"\"\"aws_filter_ebs_unattached_volumes Returns an array of ebs volumes.\n", "\n", " :type region: string\n", " :param region: Used to filter the volume for specific region.\n", "\n", " :rtype: Tuple with status result and list of EBS Unattached Volume.\n", " \"\"\"\n", " result=[]\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", "\n", " for reg in all_regions:\n", " try:\n", " # Filtering the volume by region\n", " ec2Client = handle.resource('ec2', region_name=reg)\n", " volumes = ec2Client.volumes.all()\n", "\n", " # collecting the volumes which has zero attachments\n", " for volume in volumes:\n", " volume_dict = {}\n", " if len(volume.attachments) == 0:\n", " volume_dict[\"region\"] = reg\n", " volume_dict[\"volume_id\"] = volume.id\n", " result.append(volume_dict)\n", " except Exception as e:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not ebs_volume\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"unattached_volumes\")\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_filter_ebs_unattached_volumes, lego_printer=aws_filter_ebs_unattached_volumes_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "44f706b0-5e9e-4851-88fb-668cd57b8139", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 Extension", "orderProperties": [], "tags": [], "title": "Step-1 Extension" }, "source": [ "Output variable:
\n", "unattached_volumes
In this action, we modify the output from step 1 and return a list of dictionary items for the unattached EBS volume.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 10, "id": "85f04201-71c3-48cd-ad39-cdb78addcd44", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-05-17T16:32:51.626Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Modify Step-1 Output", "orderProperties": [], "tags": [], "title": "Modify Step-1 Output" }, "outputs": [], "source": [ "ebs_list = []\n", "try:\n", " if unattached_volumes[0] == False:\n", " for volume in unattached_volumes[1]:\n", " ebs_list.append(volume)\n", "except Exception as e:\n", " if ebs_volume:\n", " for i in ebs_volume:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"volume_id\"] = i\n", " ebs_list.append(data_dict)\n", " else:\n", " raise Exception(e)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "9bcb8839-c160-4d2b-9af3-0f133d45bcd7", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "Output variable: ebs_list
\n", "
Here we will use the unSkript Create Snapshot Of EBS Volume action. In this action, we will back up the data stored in EBS volumes by passing the list of unattached EBS volumes from step 1 and creating a snapshot of the EBS volume of the EC2 instance.
\n", "\n", "\n", "Input parameters:
\n", "volume_id,region
\n", "" ] }, { "cell_type": "code", "execution_count": 30, "id": "f2c931e1-b221-416c-8493-270e34511035", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "b2fa154276e80ccc52ca79ee65d784371889f5011175fa9313f5c052dd44c5cb", "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Create a snapshot for EBS volume of the EC2 Instance for backing up the data stored in EBS", "execution_data": { "last_date_success_run_cell": "2023-01-30T20:14:12.838Z" }, "id": 177, "index": 177, "inputData": [ { "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" }, "volume_id": { "constant": false, "value": "\"iter.get(\\\\\"volume_id\\\\\")\"" } } ], "inputschema": [ { "properties": { "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "volume_id": { "description": "Volume ID to create snapshot for particular volume e.g. vol-01eb21cfce30a956c", "title": "Volume ID", "type": "string" } }, "required": [ "volume_id", "region" ], "title": "aws_create_volumes_snapshot", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "region": "region", "volume_id": "volume_id" }, "iter_list": { "constant": false, "objectItems": true, "value": "ebs_list" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Create Snapshot For Volume", "nouns": [], "orderProperties": [ "volume_id", "region" ], "output": { "type": "" }, "outputParams": { "output_name": "snapshot_metadata", "output_name_enabled": true }, "printOutput": true, "tags": [], "verbs": [] }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2022 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import List\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_create_volumes_snapshot_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_create_volumes_snapshot(handle, volume_id: str, region: str) -> List:\n", " \"\"\"aws_create_volumes_snapshot Returns an list containing SnapshotId.\n", "\n", " :type region: string\n", " :param region: used to filter the volume for a given region.\n", "\n", " :type volume_id: string\n", " :param volume_id: Volume ID to create snapshot for particular volume.\n", "\n", " :rtype: List containing SnapshotId.\n", " \"\"\"\n", " result = []\n", "\n", " ec2Client = handle.resource('ec2', region_name=region)\n", "\n", " try:\n", " response = ec2Client.create_snapshot(VolumeId=volume_id)\n", " result.append(response)\n", " except Exception as e:\n", " raise e\n", "\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\",\n", " \"volume_id\": \"iter.get(\\\\\"volume_id\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"ebs_list\",\n", " \"iter_parameter\": [\"volume_id\",\"region\"]\n", " }''')\n", "\n", "task.configure(outputName=\"snapshot_metadata\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_create_volumes_snapshot, lego_printer=aws_create_volumes_snapshot_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "bafc0d66-295a-4a46-815b-6b2fbb2c5d75", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2 Extension", "orderProperties": [], "tags": [], "title": "Step-2 Extension" }, "source": [ "Output variable:
\n", "snapshot_metadata
In this action, we modify the output from step 2 and return a list of dictionary items for the volumes whose snapshot has been created.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 32, "id": "3de76e1e-ef8a-4dc9-9300-abcf4efb78ad", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-01-30T20:14:53.327Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Modify Step-2", "orderProperties": [], "tags": [], "title": "Modify Step-2" }, "outputs": [], "source": [ "import json\n", "\n", "snapshot_volumes = []\n", "for k, v in snapshot_metadata.items():\n", " try:\n", " if v[0].id:\n", " snap_dict = json.loads(k.replace(\"\\'\", \"\\\"\"))\n", " snapshot_volumes.append(snap_dict)\n", " except Exception as e:\n", " pass" ] }, { "attachments": {}, "cell_type": "markdown", "id": "1c18b419-768f-4479-bdbf-d64fef6792c3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-3", "orderProperties": [], "tags": [], "title": "Step-3" }, "source": [ "Output variable: snapshot_volumes
\n", "
Delete EBS Volume
\n", "In this action, we delete the unattached EBS volume we get after steps 1 and 2.
\n", "\n", "\n", "Input parameters:
\n", "volume_id,region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "f85c99de-3ba6-4aae-a85d-1b790e7a00a2", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "e8cccc03e1af323982c0ab9f06c01127c0481ca81943eb7e82e46245140b1059", "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Delete AWS Volume by Volume ID", "id": 273, "index": 273, "inputData": [ { "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" }, "volume_id": { "constant": false, "value": "\"iter.get(\\\\\"volume_id\\\\\")\"" } } ], "inputschema": [ { "properties": { "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "volume_id": { "description": "Volume ID.", "title": "Volume ID", "type": "string" } }, "required": [ "volume_id", "region" ], "title": "aws_delete_volumes", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "region": "region", "volume_id": "volume_id" }, "iter_list": { "constant": false, "objectItems": true, "value": "snapshot_volumes" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Delete AWS EBS Volume by Volume ID", "nouns": [], "orderProperties": [ "volume_id", "region" ], "output": { "type": "" }, "outputParams": { "output_name": "deletion_information", "output_name_enabled": true }, "printOutput": true, "startcondition": "len(snapshot_volumes) > 0", "tags": [ "aws_delete_volumes" ], "title": "Delete AWS EBS Volume by Volume ID", "verbs": [] }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2022 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import List\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_delete_volumes_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint({\"Output\": output})\n", "\n", "\n", "@beartype\n", "def aws_delete_volumes(handle, volume_id: str, region: str) -> str:\n", " \"\"\"aws_filter_ebs_unattached_volumes Returns an array of ebs volumes.\n", "\n", " :type handle: object\n", " :param handle: Object returned by the task.validate(...) method.\n", "\n", " :type region: string\n", " :param region: Used to filter the volume for specific region.\n", "\n", " :type volume_id: string\n", " :param volume_id: Volume ID needed to delete particular volume.\n", "\n", " :rtype: Result of the API in the List form.\n", " \"\"\"\n", " result = []\n", "\n", " ec2Client = handle.client('ec2',region_name=region)\n", "\n", " # Adding logic for deletion criteria\n", " try:\n", " response = ec2Client.delete_volume(VolumeId=volume_id,)\n", " result.append(response)\n", " except Exception as e:\n", " result.append(e)\n", "\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"volume_id\": \"iter.get(\\\\\"volume_id\\\\\")\",\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"snapshot_volumes\",\n", " \"iter_parameter\": [\"volume_id\",\"region\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(snapshot_volumes) > 0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"deletion_information\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_delete_volumes, lego_printer=aws_delete_volumes_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "943ceb40-c278-45a7-81a0-d16a686d1db8", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "### Conclusion\n", "Output variable:
\n", "deletion_information
In this Runbook, we demonstrated the use of unSkript's AWS actions to filter unattached EBS volumes and create snapshots of those and delete them. To view the full platform capabunscriptedof unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Delete Unattached AWS EBS Volumes", "parameters": [ "ebs_volume", "region" ] }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "parameterSchema": { "properties": { "ebs_volume": { "default": "[\"abc\"]", "description": "Volume Id of the unattached volume.", "title": "ebs_volume", "type": "array" }, "region": { "default": "abc", "description": "AWS region e.g. \"us-west-2\"", "title": "region", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "e8899eb02dfbc033aab5733bdae1bd213fa031d40331094008e8673d99ebab63" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Delete_Unattached_EBS_Volume.json ================================================ { "name": "Delete Unattached AWS EBS Volumes", "description": "This runbook can be used to delete all unattached EBS Volumes within an AWS region. You can delete an Amazon EBS volume that you no longer need. After deletion, its data is gone and the volume can't be attached to any instance. So before deletion, you can store a snapshot of the volume, which you can use to re-create the volume later.", "uuid": "da23633be34037f023e1c1f56220ec75eb2729d7d8eb2bca9badec15ed0fd2ca", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Delete_Unused_AWS_Secrets.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "5424264e-6195-4cf9-906b-24b02d5a83f3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1. Find unused secrets
2. Delete unused secrets
Using unSkript's Filter AWS Unused Secrets action, we will find unused secrets given a threshold number of days from their last use date. By default threshold number of days is set to 30.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region, threhold_days
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "9854addd-0a54-40f2-a1f5-5ccf4630dd87", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_COST_OPT" ], "actionDescription": "This action lists all the unused secrets from AWS by comparing the last used date with the given threshold.", "actionEntryFunction": "aws_list_unused_secrets", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "2a9101a1cf7be1cf70a30de2199dca5b302c3096" ], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS List Unused Secrets", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "97f5fa81fca213403df2f1b3c17e6f83024b7df66f313f537abaa2a00dab745b", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "This action lists all the unused secrets from AWS by comparing the last used date with the given threshold.", "id": 4, "index": 4, "inputData": [ { "max_age_days": { "constant": false, "value": "int(threshold_days)" }, "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "max_age_days": { "default": 30, "description": "The threshold to check the last use of the secret.", "title": "Max Age Day's", "type": "integer" }, "region": { "default": "", "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [], "title": "aws_list_unused_secrets", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS List Unused Secrets", "orderProperties": [ "region", "max_age_days" ], "outputParams": { "output_name": "unused_secrets", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not secret_names", "tags": [ "aws_list_unused_secrets" ], "uuid": "97f5fa81fca213403df2f1b3c17e6f83024b7df66f313f537abaa2a00dab745b", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "import pprint\n", "from typing import Optional, Tuple\n", "from datetime import datetime, timedelta\n", "from pydantic import BaseModel, Field\n", "from unskript.connectors.aws import aws_get_paginator\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "import pytz\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_list_unused_secrets_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_list_unused_secrets(handle, region: str = \"\", max_age_days: int = 30) -> Tuple:\n", " \"\"\"aws_list_unused_secrets Returns an array of unused secrets.\n", "\n", " :type region: string\n", " :param region: AWS region.\n", "\n", " :type max_age_days: int\n", " :param max_age_days: The threshold to check the last use of the secret.\n", "\n", " :rtype: Tuple with status result and list of unused secrets.\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", "\n", " for reg in all_regions:\n", " try:\n", " # Filtering the secrets by region\n", " ec2Client = handle.client('secretsmanager', region_name=reg)\n", " res = aws_get_paginator(ec2Client, \"list_secrets\", \"SecretList\")\n", " for secret in res:\n", " secret_dict = {}\n", " secret_id = secret['Name']\n", " last_accessed_date = ec2Client.describe_secret(SecretId=secret_id)\n", " if 'LastAccessedDate' in last_accessed_date:\n", " if last_accessed_date[\"LastAccessedDate\"] < datetime.now(pytz.UTC) - timedelta(days=int(max_age_days)):\n", " secret_dict[\"secret_name\"] = secret_id\n", " secret_dict[\"region\"] = reg\n", " result.append(secret_dict)\n", " else:\n", " if last_accessed_date[\"CreatedDate\"] < datetime.now(pytz.UTC) - timedelta(days=int(max_age_days)):\n", " secret_dict[\"secret_name\"] = secret_id\n", " secret_dict[\"region\"] = reg\n", " result.append(secret_dict)\n", " except Exception:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"max_age_days\": \"int(threshold_days)\"\n", " }''')\n", "\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not secret_names\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"unused_secrets\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_list_unused_secrets, lego_printer=aws_list_unused_secrets_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "45c3142e-4eb4-4ae7-9522-08fff5207d1f", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "unused_secrets
This action filters regions that have no unused secrets and creates a list of those that have them.
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": 7, "id": "6a10e980-9f17-4436-9166-90ea130aa316", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-04-18T13:49:30.460Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Unused Secrets", "orderProperties": [], "tags": [], "title": "Create List of Unused Secrets" }, "outputs": [], "source": [ "all_unused_secrets = []\n", "try:\n", " if unused_secrets[0] == False:\n", " for secret in unused_secrets[1]:\n", " all_unused_secrets.append(secret)\n", "except Exception as e:\n", " if secret_names:\n", " for name in secret_names:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"secret_name\"] = name\n", " all_unused_secrets.append(data_dict)\n", " else:\n", " raise Exception(e)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "978d3b61-2fd9-461d-89bd-534d2dcf3b63", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_unused_secrets
This action deleted unused secrets found in Step 1.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "2def0b0d-772b-4bee-896e-98463a564477", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS" ], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "e83805f8b044c82cabcec54003ce692f54ab8781b70d6fde24b9915cb2b166a7", "checkEnabled": false, "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "AWS Delete Secret", "id": 242, "index": 242, "inputData": [ { "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" }, "secret_name": { "constant": false, "value": "\"iter.get(\\\\\"secret_name\\\\\")\"" } } ], "inputschema": [ { "properties": { "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "secret_name": { "description": "Name of the secret to be deleted.", "title": "Secret Name", "type": "string" } }, "required": [ "secret_name", "region" ], "title": "aws_delete_secret", "type": "object" } ], "isUnskript": false, "iterData": [ { "iter_enabled": true, "iter_item": { "region": "region", "secret_name": "secret_name" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_unused_secrets" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Delete Secret", "nouns": [], "orderProperties": [ "secret_name", "region" ], "output": { "type": "" }, "printOutput": true, "startcondition": "if len(all_unused_secrets)!=0", "tags": [ "aws_delete_secret" ], "verbs": [] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2023 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Dict\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_delete_secret_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_delete_secret(handle, region: str, secret_name: str) -> Dict:\n", " \"\"\"aws_delete_secret Dict with secret details.\n", "\n", " :type handle: object\n", " :param handle: Object returned from Task Validate\n", "\n", " :type secret_name: string\n", " :param secret_name: Name of the secret to be deleted.\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :rtype: Dict with secret details.\n", " \"\"\"\n", " try:\n", " secrets_client = handle.client('secretsmanager', region_name=region)\n", " response = secrets_client.delete_secret(SecretId=secret_name)\n", " return response\n", " except Exception as e:\n", " raise Exception(e)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\",\n", " \"secret_name\": \"iter.get(\\\\\"secret_name\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_unused_secrets\",\n", " \"iter_parameter\": [\"region\",\"secret_name\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"if len(all_unused_secrets)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_delete_secret, lego_printer=aws_delete_secret_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "44a6cf05-385b-445d-a503-ad4aa607a568", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "region, secret_name
In this Runbook, we were able to filter unused secrets and delete those keys. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Delete Unused AWS Secrets", "parameters": [ "region", "threshold_days" ] }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "region": { "description": "AWS Regions to get the secrets from. Eg: us-west-2. If nothing is given all regions will be considered.", "title": "region", "type": "string" }, "secret_names": { "description": "List of AWS Secret Names. Eg: [\"sbox-alex/mongodbsecret\",\"user1/importsecret\"]", "title": "secret_names", "type": "array" }, "threshold_days": { "default": "30", "description": "The threshold number of days to check the last use of the secret.", "title": "threshold_days", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Delete_Unused_AWS_Secrets.json ================================================ { "name": "Delete Unused AWS Secrets", "description": "This runbook can be used to delete unused secrets in AWS.", "uuid": "2a9101a1cf7be1cf70a30de2199dca5b302c3096", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Delete_Unused_Log_Streams.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "5424264e-6195-4cf9-906b-24b02d5a83f3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Find unused log streams
2) Delete unused log streams
Using unSkript's Filter AWS Filter Unused Log Streams action, we will find unused log streams given a threshold number of days from their last use date.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region, threhold_days
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "ce3e22b1-4f4e-4f16-a0e4-c57b95d0bb9a", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_LOGS" ], "actionDescription": "This action lists all log streams that are unused for all the log groups by the given threshold.", "actionEntryFunction": "aws_filter_unused_log_streams", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "64b6e7809ddfb1094901da74924ca3386510a1cd" ], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Filter Unused Log Stream", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "53df09f034bd51da247c01b663d9e7c84d0ca615cfed4bfe2545547a5a4466be", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "This action lists all log streams that are unused for all the log groups by the given threshold.", "execution_data": { "last_date_success_run_cell": "2023-05-17T13:56:11.674Z" }, "id": 1, "index": 1, "inputData": [ { "region": { "constant": false, "value": "region" }, "time_period_in_days": { "constant": false, "value": "int(threshold_days)" } } ], "inputschema": [ { "properties": { "region": { "description": "AWS Region", "title": "Region", "type": "string" }, "time_period_in_days": { "default": 30, "description": "(in days)\u00a0The threshold to filter the unused log strams.", "title": "Threshold (in days)", "type": "integer" } }, "required": [], "title": "aws_filter_unused_log_streams", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Filter Unused Log Stream", "orderProperties": [ "time_period_in_days", "region" ], "outputParams": { "output_name": "unused_log_streams", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not log_stream_name and not log_group_name", "tags": [ "aws_filter_unused_log_streams" ], "uuid": "53df09f034bd51da247c01b663d9e7c84d0ca615cfed4bfe2545547a5a4466be", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, List, Tuple\n", "from datetime import datetime, timedelta\n", "import botocore.config\n", "from unskript.connectors.aws import aws_get_paginator\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_filter_unused_log_streams_printer(output):\n", " if output is None:\n", " return\n", "\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_filter_unused_log_streams(handle, region: str = \"\", time_period_in_days: int = 30) -> Tuple:\n", " \"\"\"aws_filter_unused_log_streams Returns an array of unused log strams for all log groups.\n", "\n", " :type region: string\n", " :param region: Used to filter the volume for specific region.\n", "\n", " :type time_period_in_days: int\n", " :param time_period_in_days: (in days)\u00a0The threshold to filter the unused log strams.\n", "\n", " :rtype: Array of unused log strams for all log groups.\n", " \"\"\"\n", " result = []\n", " now = datetime.utcnow()\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", "\n", " for reg in all_regions:\n", " try:\n", " start_time = now - timedelta(days=time_period_in_days)\n", " config = botocore.config.Config(retries={'max_attempts': 10})\n", " ec2Client = handle.client('logs', region_name=reg, config=config)\n", " response = aws_get_paginator(ec2Client, \"describe_log_groups\", \"logGroups\")\n", " for log_group in response:\n", " log_group_name = log_group['logGroupName']\n", " response1 = aws_get_paginator(ec2Client, \"describe_log_streams\", \"logStreams\",\n", " logGroupName=log_group_name,\n", " orderBy='LastEventTime',\n", " descending=True)\n", "\n", " for log_stream in response1:\n", " unused_log_streams = {}\n", " last_event_time = log_stream.get('lastEventTimestamp')\n", " if last_event_time is None:\n", " # The log stream has never logged an event\n", " unused_log_streams[\"log_group_name\"] = log_group_name\n", " unused_log_streams[\"log_stream_name\"] = log_stream['logStreamName']\n", " unused_log_streams[\"region\"] = reg\n", " result.append(unused_log_streams)\n", " elif datetime.fromtimestamp(last_event_time/1000.0) < start_time:\n", " # The log stream has not logged an event in the past given days\n", " unused_log_streams[\"log_group_name\"] = log_group_name\n", " unused_log_streams[\"log_stream_name\"] = log_stream['logStreamName']\n", " unused_log_streams[\"region\"] = reg\n", " result.append(unused_log_streams)\n", " except Exception as e:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"time_period_in_days\": \"int(threshold_days)\",\n", " \"region\": \"region\"\n", " }''')\n", "\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not log_stream_name and not log_group_name\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"unused_log_streams\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_filter_unused_log_streams, lego_printer=aws_filter_unused_log_streams_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "199591ef-cb3a-49b7-b515-3c6998050320", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "unused_log_streams
This action filters regions that have no unused log streams and creates a list of those that have them.
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "c153b29e-fc95-445a-9400-4a04c63315b3", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Unused Log Streams", "orderProperties": [], "tags": [], "title": "Create List of Unused Log Streams" }, "outputs": [], "source": [ "all_unused_log_streams = []\n", "try:\n", " if unused_log_streams[0] == False:\n", " if len(unused_log_streams[1])!=0:\n", " all_unused_log_streams=unused_log_streams[1]\n", "except Exception:\n", " for log_s in log_stream_name:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"log_group_name\"] = log_group_name\n", " data_dict[\"log_stream_name\"] = log_s\n", " all_unused_log_streams.append(data_dict)\n", "print(all_unused_log_streams)" ] }, { "cell_type": "markdown", "id": "cc3c4396-dcb7-482e-8835-bb918fca83fa", "metadata": { "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_unused_log_streams
This action deleted unused log streams found in Step 1.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "b90d92fe-69d9-4370-bec3-7b9b68e70169", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionOutputType": null, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "1fbb5c722fc8f70530e452566e341be44ecf4df4a62e4f2253508a1d47288745", "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "AWS Delete Log Stream", "id": 1, "index": 1, "inputData": [ { "log_group_name": { "constant": false, "value": "\"iter.get(\\\\\"log_group_name\\\\\")\"" }, "log_stream_name": { "constant": false, "value": "\"iter.get(\\\\\"log_stream_name\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "log_group_name": { "description": "Name of the log group.", "title": "Log Group Name", "type": "string" }, "log_stream_name": { "description": "Name of the log stream.", "title": "Log Stream Name", "type": "string" }, "region": { "description": "AWS Region", "title": "Region", "type": "string" } }, "required": [ "log_group_name", "log_stream_name", "region" ], "title": "aws_delete_log_stream", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "log_group_name": "log_group_name", "log_stream_name": "log_stream_name", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_unused_log_streams" } } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "metadata": { "action_bash_command": false, "action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS" ], "action_description": "AWS Delete Log Stream", "action_entry_function": "aws_delete_log_stream", "action_is_check": false, "action_is_remediation": false, "action_needs_credential": true, "action_next_hop": null, "action_next_hop_parameter_mapping": null, "action_nouns": null, "action_output_type": "ACTION_OUTPUT_TYPE_DICT", "action_supports_iteration": true, "action_supports_poll": true, "action_title": "AWS Delete Log Stream", "action_type": "LEGO_TYPE_AWS", "action_verbs": null, "action_version": "1.0.0" }, "name": "AWS Delete Log Stream", "orderProperties": [ "log_group_name", "log_stream_name", "region" ], "printOutput": true, "startcondition": "len(all_unused_log_streams)==0", "tags": [ "aws_delete_log_stream" ], "title": "AWS Delete Log Stream", "uuid": "1fbb5c722fc8f70530e452566e341be44ecf4df4a62e4f2253508a1d47288745", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Dict\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_delete_log_stream_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_delete_log_stream(handle, log_group_name: str, log_stream_name: str, region: str) -> Dict:\n", " \"\"\"aws_delete_log_stream Deletes a log stream.\n", "\n", " :type log_group_name: string\n", " :param log_group_name: Name of the log group.\n", "\n", " :type log_stream_name: string\n", " :param log_stream_name: Name of the log stream.\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :rtype: Dict with the deleted log stream info.\n", " \"\"\"\n", " try:\n", " log_Client = handle.client('logs', region_name=region)\n", " response = log_Client.delete_log_stream(\n", " logGroupName=log_group_name,\n", " logStreamName=log_stream_name)\n", " return response\n", " except Exception as e:\n", " raise Exception(e)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"log_group_name\": \"iter.get(\\\\\"log_group_name\\\\\")\",\n", " \"log_stream_name\": \"iter.get(\\\\\"log_stream_name\\\\\")\",\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_unused_log_streams\",\n", " \"iter_parameter\": [\"log_group_name\",\"log_stream_name\",\"region\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_unused_log_streams)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_delete_log_stream, lego_printer=aws_delete_log_stream_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "44a6cf05-385b-445d-a503-ad4aa607a568", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "all_unused_log_streams
In this Runbook, we were able to filter unused log streams before a given threshold number of days and delete them. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Delete Unused AWS Log Streams", "parameters": [ "region", "threshold_days" ] }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "log_group_name": { "description": "Log group name to get the log streams from.", "title": "log_group_name", "type": "string" }, "log_stream_name": { "description": "List of log streams to delete. Eg: [\"log_stream_1\", \"log_stream_2\"]", "title": "log_stream_name", "type": "array" }, "region": { "description": "AWS Region to get the log streams from. Eg: \"us-west-2\". If nothing is given all regions will be considered.", "title": "region", "type": "string" }, "threshold_days": { "default": "30", "description": "The threshold number of days to check the unused streams", "title": "threshold_days", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Delete_Unused_Log_Streams.json ================================================ { "name": "Delete Unused AWS Log Streams", "description": "Cloudwatch will retain empty Log Streams after the data retention time period. Those log streams should be deleted in order to save costs. This runbook can find unused log streams over a threshold number of days and help you delete them.", "uuid": "64b6e7809ddfb1094901da74924ca3386510a1cd", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Delete_Unused_NAT_Gateways.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "625dfbc1-d348-4423-97b8-df672384cdd1", "metadata": { "jupyter": { "source_hidden": false }, "name": "Runbook Overview", "orderProperties": [], "tags": [], "title": "Runbook Overview" }, "source": [ "1. AWS Find Unused NAT Gateways
2. AWS Delete NAT Gateway
Here we will use unSkript AWS Find Unused NAT Gateways action. This action filters all the NAT Gateways from the given region and returns a list of all the unused NAT Gateways. It will execute if the nat_gateway_ids parameter is not passed.
\n", "\n", "\n", "Input parameters:
\n", "region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "1839b310-58d6-4746-85e7-5f136f74e237", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_NAT_GATEWAY", "CATEGORY_TYPE_AWS_EC2" ], "actionDescription": "This action to get all of the Nat gateways that have zero traffic over those", "actionEntryFunction": "aws_filter_unused_nat_gateway", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "f2b1eecf9b4f727ec80fc4d4f5c7915b788cafe969552af0a26f8db9747bbcd4" ], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Find Unused NAT Gateways", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "0f0c137beaf6a9246508393d1e868cea529d30a88631cd0f321799acbfbd47bb", "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "This action to get all of the Nat gateways that have zero traffic over those", "id": 1, "index": 1, "inputData": [ { "number_of_days": { "constant": false, "value": "int(number_of_days)" }, "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "number_of_days": { "description": "Number of days to check the Datapoints.", "title": "Number of Days", "type": "integer" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [], "title": "aws_filter_unused_nat_gateway", "type": "object" } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Find Unused NAT Gateways", "orderProperties": [ "region", "number_of_days" ], "outputParams": { "output_name": "unused_nat_gateways", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not nat_gateway_ids", "tags": [ "aws_filter_unused_nat_gateway" ], "uuid": "0f0c137beaf6a9246508393d1e868cea529d30a88631cd0f321799acbfbd47bb", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import Optional, Tuple\n", "from pydantic import BaseModel, Field\n", "from datetime import datetime, timedelta\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_filter_unused_nat_gateway_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def is_nat_gateway_used(handle, nat_gateway, start_time, end_time,number_of_days):\n", " datapoints = []\n", " if nat_gateway['State'] != 'deleted':\n", " # Get the metrics data for the specified NAT Gateway over the last 7 days\n", " metrics_data = handle.get_metric_statistics(\n", " Namespace='AWS/NATGateway',\n", " MetricName='ActiveConnectionCount',\n", " Dimensions=[\n", " {\n", " 'Name': 'NatGatewayId',\n", " 'Value': nat_gateway['NatGatewayId']\n", " },\n", " ],\n", " StartTime=start_time,\n", " EndTime=end_time,\n", " Period=86400*number_of_days,\n", " Statistics=['Sum']\n", " )\n", " datapoints += metrics_data['Datapoints']\n", " if len(datapoints) == 0 or metrics_data['Datapoints'][0]['Sum']==0:\n", " return False\n", " else:\n", " return True\n", "\n", "\n", "@beartype\n", "def aws_filter_unused_nat_gateway(handle, number_of_days: int = 7, region: str = \"\") -> Tuple:\n", " \"\"\"aws_get_natgateway_by_vpc Returns an array of NAT gateways.\n", "\n", " :type region: string\n", " :param region: Region to filter NAT Gateways.\n", "\n", " :type number_of_days: int\n", " :param number_of_days: Number of days to check the Datapoints.\n", "\n", " :rtype: Array of NAT gateways.\n", " \"\"\"\n", " result = []\n", " end_time = datetime.utcnow()\n", " start_time = end_time - timedelta(days=number_of_days)\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", "\n", " for reg in all_regions:\n", " try:\n", " ec2Client = handle.client('ec2', region_name=reg)\n", " cloudwatch = handle.client('cloudwatch', region_name=reg)\n", " response = ec2Client.describe_nat_gateways()\n", " for nat_gateway in response['NatGateways']:\n", " nat_gateway_info = {}\n", " if not is_nat_gateway_used(cloudwatch, nat_gateway, start_time, end_time,number_of_days):\n", " nat_gateway_info[\"nat_gateway_id\"] = nat_gateway['NatGatewayId']\n", " nat_gateway_info[\"region\"] = reg\n", " result.append(nat_gateway_info)\n", " except Exception as e:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"number_of_days\": \"int(number_of_days)\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not nat_gateway_ids\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"unused_nat_gateways\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_filter_unused_nat_gateway, lego_printer=aws_filter_unused_nat_gateway_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "c597d85b-9748-421b-a3fe-e6499fa167f4", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 Extension", "orderProperties": [], "tags": [], "title": "Step-1 Extension" }, "source": [ "Output variable:
\n", "unused_nat_gateways
In this action, we modify the output from Step 1 and return a list of unused NAT gateways.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 24, "id": "2fdc0c0f-ea85-498a-88c1-04352631c8f8", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-04-27T10:19:08.248Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Modify Unused NAT Gateways Output", "orderProperties": [], "tags": [], "title": "Modify Unused NAT Gateways Output" }, "outputs": [], "source": [ "nat_gateways = []\n", "try:\n", " if unused_nat_gateways[0] == False:\n", " for nat in unused_nat_gateways[1]:\n", " nat_gateways.append(nat)\n", "except Exception as e:\n", " if nat_gateway_ids:\n", " for nat in nat_gateway_ids:\n", " nat_ids = {}\n", " nat_ids[\"nat_gateway_id\"] = nat\n", " nat_ids[\"region\"] = region\n", " nat_gateways.append(nat_ids)\n", " else:\n", " raise Exception(e)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "697f38a7-613c-4616-a37a-32b977f4faa0", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "Output variable: nat_gateways
\n", "
Here we will use the unSkript AWS Delete NAT Gateway action. In this action, we will pass the list of unused NAT Gateways from Step 1 and delete those NAT Gateways.
\n", "\n", "\n", "Input parameters:
\n", "nat_gateway_id,region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "24c2d14a-a543-4251-8243-d12c052f89b1", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS" ], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "c24c20b1d1d8a9f31ddbf6f2adf96cbd37df3a0fcf99e4a9a85b1f8b897ad8d4", "checkEnabled": false, "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "AWS Delete NAT Gateway", "id": 240, "index": 240, "inputData": [ { "nat_gateway_id": { "constant": false, "value": "\"iter.get(\\\\\"nat_gateway_id\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "nat_gateway_id": { "description": "ID of the NAT Gateway.", "title": "NAT Gateway ID", "type": "string" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [ "nat_gateway_id", "region" ], "title": "aws_delete_nat_gateway", "type": "object" } ], "isUnskript": false, "iterData": [ { "iter_enabled": true, "iter_item": { "nat_gateway_id": "nat_gateway_id", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "nat_gateways" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Delete NAT Gateway", "nouns": [], "orderProperties": [ "nat_gateway_id", "region" ], "output": { "type": "" }, "outputParams": { "output_name": "delete_status", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "len(nat_gateways) != 0", "tags": [ "aws_delete_nat_gateway" ], "title": "AWS Delete NAT Gateway", "verbs": [] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Dict\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_delete_nat_gateway_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_delete_nat_gateway(handle, nat_gateway_id: str, region: str) -> Dict:\n", " \"\"\"aws_delete_nat_gateway Returns an dict of NAT gateways information.\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :type nat_gateway_id: string\n", " :param nat_gateway_id: ID of the NAT Gateway.\n", "\n", " :rtype: dict of NAT gateways information.\n", " \"\"\"\n", " try:\n", " ec2Client = handle.client('ec2', region_name=region)\n", " response = ec2Client.delete_nat_gateway(NatGatewayId=nat_gateway_id)\n", " return response\n", " except Exception as e:\n", " raise Exception(e)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"nat_gateway_id\": \"iter.get(\\\\\"nat_gateway_id\\\\\")\",\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"nat_gateways\",\n", " \"iter_parameter\": [\"nat_gateway_id\",\"region\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(nat_gateways) != 0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"delete_status\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_delete_nat_gateway, lego_printer=aws_delete_nat_gateway_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "5954b863-9e8b-42f7-be29-5aa9afe3afd4", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "### Conclusion\n", "Output variable:
\n", "delete_status
In this Runbook, we demonstrated the use of unSkript's AWS actions to filter unused NAT Gateways and delete those. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Delete Unused NAT Gateways", "parameters": [ "number_of_days", "nat_gateway_ids", "region" ] }, "kernelspec": { "display_name": "Python 3.10.6 64-bit", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "outputParameterSchema": { "definitions": null, "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "definitions": null, "properties": { "nat_gateway_ids": { "description": "NAT Gateways which needs to delete.", "title": "nat_gateway_ids", "type": "array" }, "number_of_days": { "default": 7, "description": "A number of days to check NAT gateways are not used.", "title": "number_of_days", "type": "number" }, "region": { "description": "AWS Region", "title": "region", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Delete_Unused_NAT_Gateways.json ================================================ { "name": "Delete Unused NAT Gateways", "description": "This runbook search for all unused NAT gateways from all the region and delete those gateways.", "uuid": "f2b1eecf9b4f727ec80fc4d4f5c7915b788cafe969552af0a26f8db9747bbcd4", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Delete_Unused_Route53_Healthchecks.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "82eebdfd-c880-40df-bd6d-5b546c92164b", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "1) Get unused Route53 healthchecks
2) Delete the healthchecks
Using unSkript's Get Route53 Unused Healthchecks , we will find the healthcheck IDs that are not being used by any record set to monitor their health.
\n", "\n", "\n", "This action takes the following parameters:
\n", "hosted_zone_id(Optional)
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "92ad5e21-d5ca-419a-b3ae-ba8d524da815", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ROUTE53" ], "actionDescription": "AWS get Unused Route53 Health Checks", "actionEntryFunction": "aws_get_unused_route53_health_checks", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS get Unused Route53 Health Checks", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "7bde6d48cf5e9b2b984335fb1434716a3dba113da0762bc70f57f4246b91df07", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "AWS get Unused Route53 Health Checks", "execution_data": { "last_date_success_run_cell": "2023-05-17T12:39:36.142Z" }, "id": 1, "index": 1, "inputData": [ { "hosted_zone_id": { "constant": false, "value": "hosted_zone_id" } } ], "inputschema": [ { "properties": { "hosted_zone_id": { "default": "", "description": "Used to filter the health checks for a specific hosted zone.", "title": "Hosted Zone ID", "type": "string" } }, "required": [], "title": "aws_get_unused_route53_health_checks", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS get Unused Route53 Health Checks", "orderProperties": [ "hosted_zone_id" ], "outputParams": { "output_name": "unused_health_checks", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not heath_check_ids", "tags": [ "aws_get_unused_route53_health_checks" ], "uuid": "7bde6d48cf5e9b2b984335fb1434716a3dba113da0762bc70f57f4246b91df07", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Tuple\n", "import pprint\n", "from unskript.connectors.aws import aws_get_paginator\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_unused_route53_health_checks_printer(output):\n", " if output is None:\n", " return\n", "\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_get_unused_route53_health_checks(handle, hosted_zone_id: str = \"\") -> Tuple:\n", " \"\"\"aws_get_unused_route53_health_checks Returns a list of unused Route 53 health checks.\n", "\n", " :type hosted_zone_id: string\n", " :param hosted_zone_id: Optional. Used to filter the health checks for a specific hosted zone.\n", "\n", " :rtype: A tuple containing a list of dicts with information about the unused health checks.\n", " \"\"\"\n", " result = []\n", " try:\n", " route_client = handle.client('route53')\n", " health_checks = aws_get_paginator(route_client, \"list_health_checks\", \"HealthChecks\")\n", " if hosted_zone_id:\n", " hosted_zones = [{'Id': hosted_zone_id}]\n", " else:\n", " hosted_zones = aws_get_paginator(route_client, \"list_hosted_zones\", \"HostedZones\")\n", " used_health_check_ids = set()\n", " for zone in hosted_zones:\n", " record_sets = aws_get_paginator(route_client, \"list_resource_record_sets\", \"ResourceRecordSets\", HostedZoneId=zone['Id'])\n", " for record_set in record_sets:\n", " if 'HealthCheckId' in record_set:\n", " used_health_check_ids.add(record_set['HealthCheckId'])\n", " for hc in health_checks:\n", " if hc['Id'] not in used_health_check_ids:\n", " result.append(hc['Id'])\n", " except Exception as e:\n", " raise e\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(outputName=\"unused_health_checks\")\n", "\n", "task.configure(inputParamsJson='''{\n", " \"hosted_zone_id\": \"hosted_zone_id\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not heath_check_ids\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_unused_route53_health_checks, lego_printer=aws_get_unused_route53_health_checks_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "a311041f-620a-4b6b-914f-e52c6c3a71f4", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "unused_health_checks
This action filters the output from Step 1 to get the non empty values
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "b85ce542-bdf0-44d2-9e75-213002d5c036", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-05-17T12:41:37.390Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Unused Healthchecks", "orderProperties": [], "tags": [], "title": "Create List of Unused Healthchecks" }, "outputs": [], "source": [ "all_unused_health_checks = []\n", "try:\n", " for res in unused_health_checks:\n", " if type(res)==bool:\n", " if res == False:\n", " continue\n", " elif type(res)==list:\n", " if len(res)!=0:\n", " all_unused_health_checks=res\n", "except Exception as e:\n", " all_unused_health_checks = health_check_ids\n", "print(all_unused_health_checks)" ] }, { "cell_type": "markdown", "id": "9fb3704a-9b19-49c4-96ab-a982217bbcd3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_unused_health_checks
This action deletes the Route53 healthcheck found in Step 1.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "c84fac99-c05b-4dee-9d8a-80bfdd7a3e60", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS" ], "actionDescription": "AWS Delete Route 53 HealthCheck", "actionEntryFunction": "aws_delete_route53_health_check", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Delete Route 53 HealthCheck", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "33e50f6c5813f3b01f4d63f7ec8d3eb363873c62f28d40d623acc9091c026270", "collapsed": true, "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "AWS Delete Route 53 HealthCheck", "execution_data": { "last_date_success_run_cell": "2023-04-21T12:40:14.522Z" }, "id": 1, "index": 1, "inputData": [ { "health_check_id": { "constant": false, "value": "iter_item" } } ], "inputschema": [ { "properties": { "health_check_id": { "description": "The ID of the Health Check to delete.", "title": "Health Check ID", "type": "string" } }, "required": [ "health_check_id" ], "title": "aws_delete_route53_health_check", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": "health_check_id", "iter_list": { "constant": false, "objectItems": false, "value": "all_unused_health_checks" } } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Delete Route 53 HealthCheck", "orderProperties": [ "health_check_id" ], "printOutput": true, "startcondition": "len(all_unused_health_checks)!=0", "tags": [ "aws_delete_route53_health_check" ], "uuid": "33e50f6c5813f3b01f4d63f7ec8d3eb363873c62f28d40d623acc9091c026270", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Dict\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_delete_route53_health_check_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_delete_route53_health_check(handle, health_check_id: str) -> Dict:\n", " \"\"\"aws_delete_route53_health_check Deletes a Route 53 Health Check.\n", "\n", " :type health_check_id: string\n", " :param health_check_id: The ID of the Health Check to delete.\n", "\n", " :rtype: dict of health check information.\n", " \"\"\"\n", " try:\n", " route_client = handle.client('route53')\n", " response = route_client.delete_health_check(HealthCheckId=health_check_id)\n", " return response\n", " except Exception as e:\n", " raise Exception(e)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"health_check_id\": \"iter_item\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_unused_health_checks\",\n", " \"iter_parameter\": \"health_check_id\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_unused_health_checks)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_delete_route53_health_check, lego_printer=aws_delete_route53_health_check_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "9c7430c8-3660-45bd-90ef-9ceab77e3daa", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "health_check_id
In this Runbook, we were able delete unused healtcheck ID's which will help in saving your AWS costs. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Delete Unused Route53 HealthChecks", "parameters": null }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "health_check_ids": { "description": "List of Route53 Health check IDs", "title": "health_check_ids", "type": "array" }, "hosted_zone_id": { "description": "The ID of the hosted zone that contains the resource record sets.", "title": "hosted_zone_id", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": {}, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Delete_Unused_Route53_Healthchecks.json ================================================ { "name": "Delete Unused Route53 HealthChecks", "description": "When we associate healthchecks with an endpoint, Amazon Route53 sends health check requests to the endpoint IP address. These health checks validate that the endpoint IP addresses are operating as intended. There may be multiple reasons that healtchecks are lying usused for example- health check was mistakenly configured against your application by another customer, health check was configured from your account for testing purposes but wasn't deleted when testing was complete, health check was based on domain names and hence requests were sent due to DNS caching, Elastic Load Balancing service updated its public IP addresses due to scaling, and the IP addresses were reassigned to your load balancer, and many more. This runbook finds such healthchecks and deletes them to save AWS costs.", "uuid": "10a363abaf49098a0376eae46a6bfac421e606952369fc6ea02768ad319dd0be", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Detach_ec2_Instance_from_ASG.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "9a175295-d9f6-47f1-bab9-c4b9d6cdf375", "metadata": { "jupyter": { "source_hidden": false }, "name": "Runbook Overview", "orderProperties": [], "tags": [], "title": "Runbook Overview" }, "source": [ "1. Get Unhealthy instances from ASG
\n", "2. AWS Detach Instances From AutoScaling Group
" ] }, { "cell_type": "code", "execution_count": 6, "id": "d4246eb1-a222-4926-8d78-39ed59991674", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-05-18T09:12:04.823Z" }, "name": "Input Verification", "orderProperties": [], "tags": [], "title": "Input Verification" }, "outputs": [], "source": [ "if instance_ids and not region:\n", " raise SystemExit(\"Provide region for the instance!\")\n", "if region == None:\n", " region = \"\"" ] }, { "attachments": {}, "cell_type": "markdown", "id": "3125e39b-1f1a-4927-b0ad-8589898dce2e", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 A", "orderProperties": [], "tags": [], "title": "Step-1 A" }, "source": [ "Using unSkript's Get AWS AutoScaling Group Instances action we list all the EC2 instances for a given region with Auto Scaling Group name. This action only executes if the instance_id and region have been given as parameters.
\n", "instance_ids, regionasg_instanceHere we will use unSkript Get Unhealthy instances from ASG action. This action filters all the unhealthy instances from the Auto Scaling Group. It will execute if the instance_id parameter is not given.
\n", "\n", "Input parameters:
\n", "region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "45ecf6d1-3a07-4e97-b8e7-a8b447e568a7", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ASG", "CATEGORY_TYPE_AWS_EC2" ], "actionDescription": "Get Unhealthy instances from Auto Scaling Group", "actionEntryFunction": "aws_filter_unhealthy_instances_from_asg", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "680ad9d119afab5f647e1afe7826b88d89bf35304954c3328e65a2fcf470f930" ], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Get Unhealthy instances from ASG", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "5de92ab7221455580796b1ebe93c61e3fec51d5dac22e907f96b6e0d7564e0ad", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "Get Unhealthy instances from Auto Scaling Group", "execution_data": { "last_date_success_run_cell": "2023-05-18T09:30:18.292Z" }, "id": 1, "index": 1, "inputData": [ { "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region of the ASG.", "title": "Region", "type": "string" } }, "required": [], "title": "aws_filter_unhealthy_instances_from_asg", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Get Unhealthy instances from ASG", "orderProperties": [ "region" ], "outputParams": { "output_name": "unhealthy_instance", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not instance_ids", "tags": [], "uuid": "5de92ab7221455580796b1ebe93c61e3fec51d5dac22e907f96b6e0d7564e0ad", "version": "1.0.0" }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import Optional, Tuple\n", "from pydantic import BaseModel, Field\n", "from unskript.connectors.aws import aws_get_paginator\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_filter_unhealthy_instances_from_asg_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_filter_unhealthy_instances_from_asg(handle, region: str = \"\") -> Tuple:\n", " \"\"\"aws_filter_unhealthy_instances_from_asg gives unhealthy instances from ASG\n", "\n", " :type region: string\n", " :param region: AWS region.\n", "\n", " :rtype: CheckOutput with status result and list of unhealthy instances from ASG.\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", "\n", " for reg in all_regions:\n", " try:\n", " asg_client = handle.client('autoscaling', region_name=reg)\n", " response = aws_get_paginator(asg_client, \"describe_auto_scaling_instances\", \"AutoScalingInstances\")\n", "\n", " # filter instances to only include those that are in an \"unhealthy\" state\n", " for instance in response:\n", " data_dict = {}\n", " if instance['HealthStatus'] == 'Unhealthy':\n", " data_dict[\"InstanceId\"] = instance[\"InstanceId\"]\n", " data_dict[\"AutoScalingGroupName\"] = instance[\"AutoScalingGroupName\"]\n", " data_dict[\"region\"] = reg\n", " result.append(data_dict)\n", "\n", " except Exception as e:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not instance_ids\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"unhealthy_instance\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_filter_unhealthy_instances_from_asg, lego_printer=aws_filter_unhealthy_instances_from_asg_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "32d0f938-ad56-453c-89be-52c139228017", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 Extension", "orderProperties": [], "tags": [], "title": "Step-1 Extension" }, "source": [ "Output variable:
\n", "unhealthy_instance
In this action, we modify the output from step 1 A and step 1 B to return a list of dictionary items for the unhealthy instances from ASG.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 12, "id": "e47022b7-ec19-4149-a7a7-3e2ebde54f87", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-08T13:23:56.168Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Modify Output", "orderProperties": [], "tags": [], "title": "Modify Output" }, "outputs": [], "source": [ "detach_instance_list = []\n", "try:\n", " if unhealthy_instance:\n", " if unhealthy_instance[0] == False:\n", " for instance in unhealthy_instance[1]:\n", " detach_instance_list.append(instance)\n", "except Exception as e:\n", " if instance_ids and asg_instance:\n", " for instance in asg_instance:\n", " detach_instance_list.append(instance)\n", " else:\n", " raise Exception(e)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "614ed424-9394-449e-9dc6-5547f765470a", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "Output variable: detach_instance_list
\n", "
In this action, we detach the AWS unhealthy instances from the Auto Scaling Group which we get from step 1.
\n", "\n", "\n", "Input parameters:
\n", "instance_ids, group_name, region
\n", "" ] }, { "cell_type": "code", "execution_count": 10, "id": "95603003-ac39-493a-af8a-f1910784a6f2", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "8e6e08f606d40e2f4481128d356cc67d30be72349074c513627b3f03a178cf6e", "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Use This Action to AWS Detach Instances From AutoScaling Group", "id": 284, "index": 284, "inputData": [ { "group_name": { "constant": false, "value": "\"iter.get(\\\\\"AutoScalingGroupName\\\\\")\"" }, "instance_ids": { "constant": false, "value": "\"iter.get(\\\\\"InstanceId\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "group_name": { "description": "Name of AutoScaling Group.", "title": "Group Name", "type": "string" }, "instance_ids": { "description": "List of instances.", "title": "Instance IDs", "type": "string" }, "region": { "description": "AWS Region of autoscaling group.", "title": "Region", "type": "string" } }, "required": [ "instance_ids", "group_name", "region" ], "title": "aws_detach_autoscaling_instances", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "group_name": "AutoScalingGroupName", "instance_ids": "InstanceId", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "detach_instance_list" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Detach Instances From AutoScaling Group", "nouns": [], "orderProperties": [ "instance_ids", "group_name", "region" ], "output": { "type": "" }, "outputParams": { "output_name": "detach_output", "output_name_enabled": true }, "printOutput": true, "startcondition": "len(detach_instance_list)>0", "tags": [ "aws_detach_autoscaling_instances" ], "title": "AWS Detach Instances From AutoScaling Group", "verbs": [] }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import List, Dict\n", "from pydantic import BaseModel, Field\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_detach_autoscaling_instances_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_detach_autoscaling_instances(\n", " handle,\n", " instance_ids: str,\n", " group_name: str,\n", " region: str\n", ") -> Dict:\n", " \"\"\"aws_detach_autoscaling_instances detach instances from autoscaling group.\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type instance_ids: string\n", " :param instance_ids: Name of instances.\n", "\n", " :type group_name: string\n", " :param group_name: Name of AutoScaling Group.\n", "\n", " :type region: string\n", " :param region: AWS Region of autoscaling group.\n", "\n", " :rtype: Dict with the detach instance info.\n", " \"\"\"\n", "\n", " ec2Client = handle.client(\"autoscaling\", region_name=region)\n", " result = {}\n", " try:\n", " response = ec2Client.detach_instances(\n", " InstanceIds=[instance_ids],\n", " AutoScalingGroupName=group_name,\n", " ShouldDecrementDesiredCapacity=True\n", " )\n", " result = response\n", " except Exception as error:\n", " result[\"error\"] = error\n", "\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"group_name\": \"iter.get(\\\\\"AutoScalingGroupName\\\\\")\",\n", " \"instance_ids\": \"iter.get(\\\\\"InstanceId\\\\\")\",\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"detach_instance_list\",\n", " \"iter_parameter\": [\"instance_ids\",\"group_name\",\"region\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(detach_instance_list)>0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"detach_output\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_detach_autoscaling_instances, lego_printer=aws_detach_autoscaling_instances_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "346d8d07-6708-4663-bf8c-5d17c8b6506f", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "### Conclusion\n", "Output variable:
\n", "detach_output
In this Runbook, we demonstrated the use of unSkript's AWS actions. This runbook helps to detach the instances from the Auto Scaling Group. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "AWS Detach EC2 Instance from ASG", "parameters": [ "region", "asg_name", "instance_id" ] }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "parameterSchema": { "properties": { "asg_name": { "description": "Auto Scaling Group Name. Note: if ASG name is given no need to give region.", "title": "asg_name", "type": "string" }, "instance_ids": { "description": "Instance Ids that are attached to Auto Scaling Group. Note: if instance id is given then the region is mandatory.", "title": "instance_ids", "type": "array" }, "region": { "description": "AWS region e.g.us-west-2", "title": "region", "type": "string" } }, "required": [], "title": "Schema", "type": "object" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Detach_ec2_Instance_from_ASG.json ================================================ { "name": "AWS Detach EC2 Instance from ASG", "description": "This runbook can be used to detach an instance from Auto Scaling Group. You can remove (detach) an instance that is in the InService state from an Auto Scaling group. After the instance is detached, you can manage it independently from the rest of the Auto Scaling group. By detaching an instance, you can move an instance out of one Auto Scaling group and attach it to a different group. For more information, see Attach EC2 instances to your Auto Scaling group.", "uuid": "680ad9d119afab5f647e1afe7826b88d89bf35304954c3328e65a2fcf470f930", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_EC2_Disk_Cleanup.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "bf364c84", "metadata": { "jupyter": { "source_hidden": false }, "name": "Runbook Overview", "orderProperties": [], "tags": [], "title": "Runbook Overview" }, "source": [ "Here we will use unSkript Get AWS Instance Details action. This action is used to find out all the details of the EC2 instance.
\n", "\n", "\n", "Input parameters:
\n", "instance_id, region
\n", "" ] }, { "cell_type": "code", "execution_count": 3, "id": "637d4299-e731-47f1-8bef-f0ea061ea1c3", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "aa1e026ca8002b906315feba401e5c46889d459270adce3b65d480dc9530311f", "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Use This Action to Get Details about an AWS EC2 Instance", "execution_data": { "last_date_success_run_cell": "2023-02-17T18:53:16.952Z" }, "id": 103, "index": 103, "inputData": [ { "instance_id": { "constant": false, "value": "instance_id" }, "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "instance_id": { "description": "ID of the instance.", "title": "Instance Id", "type": "string" }, "region": { "description": "AWS Region of the instance.", "title": "Region", "type": "string" } }, "required": [ "instance_id", "region" ], "title": "aws_get_instance_details", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Get AWS Instance Details: Find SSH IP", "nouns": [ "instance", "details" ], "orderProperties": [ "instance_id", "region" ], "output": { "type": "" }, "outputParams": { "output_name": "InstanceDetails", "output_name_enabled": true }, "printOutput": true, "tags": [ "aws_get_instance_details" ], "title": "Get AWS Instance Details: Find SSH IP", "verbs": [ "get" ] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_instance_details(\n", " handle,\n", " instance_id: str,\n", " region: str,\n", "):\n", "\n", " ec2client = handle.client('ec2', region_name=region)\n", " instances = []\n", " response = ec2client.describe_instances(\n", " Filters=[{\"Name\": \"instance-id\", \"Values\": [instance_id]}])\n", " for reservation in response[\"Reservations\"]:\n", " for instance in reservation[\"Instances\"]:\n", " instances.append(instance)\n", "\n", " return instances[0]\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(inputParamsJson='''{\n", " \"instance_id\": \"instance_id\",\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(outputName=\"InstanceDetails\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.output = task.execute(aws_get_instance_details, hdl=hdl, args=args)\n", " if task.output_name != None:\n", " globals().update({task.output_name: task.output[0]})\n", "\n", "if hasattr(task, 'output'):\n", " if isinstance(task.output, (list, tuple)):\n", " for item in task.output:\n", " print(f'item: {item}')\n", " elif isinstance(task.output, dict):\n", " for item in task.output.items():\n", " print(f'item: {item}')\n", " else:\n", " print(f'Output for {task.name}')\n", " print(task.output)\n", " w.tasks[task.name]= task.output\n", " ssh_ip = InstanceDetails[\"PrivateIpAddress\"]" ] }, { "cell_type": "markdown", "id": "8a4c02ff", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "Output variable:
\n", "InstanceDetails
Here we will use unSkript SSH: Locate large files on host action. This action is used to scan the file system on a given host and returns a dict of large files. The command used to perform the scan is \"find inspect_folder -type f -exec du -sk '{}' + | sort -rh | head -n count.
\n", "\n", "\n", "Input parameters:
\n", "host, inspect_folder, threshold, sudo, count
\n", "\n", "Output variable:
\n", "FileLocation
" ] }, { "cell_type": "code", "execution_count": 30, "id": "ac0a3f1d-6177-4987-a506-af53d4b48cec", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "f3bb79ce49da7d739d31e66c86308c97e481f41275e2bcdaabfc694fa97f9d02", "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "This action scans the file system on a given host and returns a dict of large files. The command used to perform the scan is \"find inspect_folder -type f -exec du -sk '{}' + | sort -rh | head -n count\"", "execution_data": { "last_date_success_run_cell": "2023-02-17T19:17:01.447Z" }, "id": 60, "index": 60, "inputData": [ { "count": { "constant": false, "value": "10" }, "host": { "constant": false, "value": "ssh_ip" }, "inspect_folder": { "constant": false, "value": "dirs_to_anaylze" }, "sudo": { "constant": true, "value": false }, "threshold": { "constant": false, "value": "int(Threshold)" } } ], "inputschema": [ { "properties": { "count": { "default": 10, "description": "Number of files to report from the scan. Default is 10", "title": "Count", "type": "integer" }, "host": { "description": "Host to connect to. Eg 10.10.10.10", "title": "Host", "type": "string" }, "inspect_folder": { "description": "Folder to inspect on the remote host. Folders are scanned using \"find inspect_folder -type f -exec du -sk '{}' + | sort -rh | head -n count\"", "title": "Inspect Folder", "type": "string" }, "sudo": { "default": false, "description": "Run the scan with sudo.", "title": "Run with sudo", "type": "boolean" }, "threshold": { "default": 100, "description": "Threshold the files on given size. Specified in Mb. Default is 100Mb", "title": "Size Threshold", "type": "integer" } }, "required": [ "host", "inspect_folder" ], "title": "ssh_find_large_files", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_SSH", "name": "SSH: Execute Remote Command: Locate large files with du", "nouns": [ "ssh", "files" ], "orderProperties": [ "host", "inspect_folder", "threshold", "count", "sudo" ], "output": { "type": "" }, "outputParams": { "output_name": "FileLocation", "output_name_enabled": true }, "printOutput": true, "tags": [ "ssh_find_large_files" ], "title": "SSH: Execute Remote Command: Locate large files with du", "verbs": [ "find", "locate" ] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "import json\n", "import tempfile\n", "import os\n", "from pydantic import BaseModel, Field\n", "from pssh.clients import ParallelSSHClient\n", "from typing import List, Optional\n", "from unskript.connectors import ssh\n", "\n", "from unskript.fwk.cellparams import CellParams\n", "from unskript import connectors\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def ssh_find_large_files(\n", " sshClient,\n", " host: str,\n", " inspect_folder: str,\n", " threshold: int = 0,\n", " sudo: bool = False,\n", " count: int = 10) -> dict:\n", " print(sshClient)\n", "\n", " client = sshClient([host], None)\n", "\n", " # find size in Kb\n", " command = \"find \" + inspect_folder + \\\n", " \" -type f -exec du -sm '{}' + | sort -rh | head -n \" + str(count)\n", " runCommandOutput = client.run_command(command=command, sudo=sudo)\n", " client.join()\n", " res = {}\n", "\n", " for host_output in runCommandOutput:\n", " for line in host_output.stdout:\n", " # line is of the form {size} {fullfilename}\n", " (size, filename) = line.split()\n", " if int(size) > threshold:\n", " res[filename] = int(size)\n", "\n", " return res\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(inputParamsJson='''{\n", " \"count\": \"10\",\n", " \"host\": \"ssh_ip\",\n", " \"inspect_folder\": \"dirs_to_anaylze\",\n", " \"sudo\": \"False\",\n", " \"threshold\": \"int(Threshold)\"\n", " }''')\n", "task.configure(outputName=\"FileLocation\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.output = task.execute(ssh_find_large_files, hdl=hdl, args=args)\n", " if task.output_name != None:\n", " globals().update({task.output_name: task.output[0]})\n", "\n", "if hasattr(task, 'output'):\n", " if isinstance(task.output, (list, tuple)):\n", " for item in task.output:\n", " print(f'item: {item}')\n", " elif isinstance(task.output, dict):\n", " for item in task.output.items():\n", " print(f'item: {item}')\n", " else:\n", " print(f'Output for {task.name}')\n", " print(task.output)\n", " w.tasks[task.name]= task.output" ] }, { "cell_type": "markdown", "id": "c646e375-e064-48d1-b101-ecd74bec93e1", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2 Extension", "orderProperties": [], "tags": [], "title": "Step-2 Extension" }, "source": [ "
This action takes data from step 2 and sorts the output to get the remote files and local files.
" ] }, { "cell_type": "code", "execution_count": 8, "id": "492dfae5-dfe1-47b9-be70-51ff64029166", "metadata": { "actionNeedsCredential": false, "actionSupportsIteration": false, "actionSupportsPoll": false, "credentialsJson": {}, "jupyter": { "source_hidden": true }, "name": "Custom Step: Create local filenames from remote filenames", "orderProperties": [], "tags": [], "title": "Custom Step: Create local filenames from remote filenames" }, "outputs": [], "source": [ "remote_files = [x for x in FileLocation.keys()]\n", "if len(remote_files) == 0:\n", " print(\"No files to process, exiting\")\n", " if hasattr(Workflow(), \"Done\"):\n", " Workflow().Done()\n", "\n", "local_files = [ \"/tmp/\" + x.lstrip(\"/\").replace(\"/\", \"_\") for x in remote_files ]\n", "mapping = []\n", "for i in range(len(remote_files)):\n", " mapping.append( {'remote': remote_files[i], 'local': local_files[i]} )\n", "print(json.dumps(mapping, indent=2))\n" ] }, { "cell_type": "markdown", "id": "3349f56f", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-3", "orderProperties": [], "tags": [], "title": "Step-3" }, "source": [ "Here we will use unSkript SCP: Remote file transfer over SSH action. This action is used to Copy files from or to the remote host. Files are copied over SCP.
\n", "\n", "\n", "Input parameters:
\n", "host, remote_file, local_file, direction
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "2a5a1b76-4385-49c1-b558-95216c34ccc4", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "a3b8cad557699dfa01f15274d81941252f965f7a2a409ac89b844db74f44e4c5", "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Copy files from or to remote host. Files are copied over SCP. ", "id": 59, "index": 59, "inputData": [ { "direction": { "constant": true, "value": true }, "host": { "constant": false, "value": "ssh_ip" }, "local_file": { "constant": false, "value": "iter.get(\"local\")" }, "remote_file": { "constant": false, "value": "iter.get(\"remote\")" } } ], "inputschema": [ { "properties": { "direction": { "default": true, "description": "Direction of the copy operation. Default is receive-from-remote-server", "title": "Receive", "type": "boolean" }, "host": { "description": "Hosts to connect to. For eg. \"10.10.10.10\"", "title": "Host", "type": "string" }, "local_file": { "description": "Filename on the unSkript proxy. Eg /tmp/my_local_file", "title": "Local File", "type": "string" }, "remote_file": { "description": "Filename on the remote server. Eg /home/ec2-user/my_remote_file", "title": "Remote File", "type": "string" } }, "required": [ "host", "remote_file", "local_file" ], "title": "ssh_scp", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_SSH", "name": "SCP: Remote file transfer over SSH", "nouns": [ "ssh", "file" ], "orderProperties": [ "host", "remote_file", "local_file", "direction" ], "output": { "type": "" }, "outputParams": { "output_name": "transfer_files", "output_name_enabled": true }, "printOutput": true, "tags": [ "ssh_scp" ], "title": "SCP: Remote file transfer over SSH", "verbs": [ "copy", "transfer", "scp" ] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "\n", "from pydantic import BaseModel, Field\n", "from gevent import joinall\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def ssh_scp(\n", " sshClient,\n", " host: str,\n", " remote_file: str,\n", " local_file: str,\n", " direction: bool = True) -> bool:\n", "\n", " client = sshClient([host], None)\n", " copy_args = [{'local_file': local_file, 'remote_file': remote_file}]\n", "\n", " if direction is True:\n", " cmds = client.copy_remote_file(remote_file=remote_file, local_file=local_file,\n", " recurse=False,\n", " suffix_separator=\"\", copy_args=copy_args,\n", " encoding='utf-8')\n", "\n", " else:\n", " cmds = client.copy_file(local_file=local_file, remote_file=remote_file,\n", " recurse=False, copy_args=None)\n", "\n", " try:\n", " joinall(cmds, raise_error=True)\n", " if direction is True:\n", " print(f\"Successfully copied file {host}://{remote_file} to {local_file}\")\n", " else:\n", " print(f\"Successfully copied file {local_file} to {host}://{remote_file}\")\n", "\n", " except Exception as e:\n", " print(f\"Error encountered while copying files {e}\")\n", " return False\n", "\n", " return True\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(inputParamsJson='''{\n", " \"direction\": \"True\",\n", " \"host\": \"ssh_ip\",\n", " \"local_file\": \"iter.get(\\\\\"local\\\\\")\",\n", " \"remote_file\": \"iter.get(\\\\\"remote\\\\\")\"\n", " }''')\n", "task.configure(outputName=\"transfer_files\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.output = task.execute(ssh_scp, hdl=hdl, args=args)\n", " if task.output_name != None:\n", " globals().update({task.output_name: task.output[0]})\n", "\n", "if hasattr(task, 'output'):\n", " if isinstance(task.output, (list, tuple)):\n", " for item in task.output:\n", " print(f'item: {item}')\n", " elif isinstance(task.output, dict):\n", " for item in task.output.items():\n", " print(f'item: {item}')\n", " else:\n", " print(f'Output for {task.name}')\n", " print(task.output)\n", " w.tasks[task.name]= task.output" ] }, { "cell_type": "markdown", "id": "029a4c00", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-4", "orderProperties": [], "tags": [], "title": "Step-4" }, "source": [ "Output variable:
\n", "transfer_files
Here we will use the unSkript Upload file to S3 action. This action is used to Upload a local file to an S3 bucket.
\n", "\n", "\n", "Input parameters:
\n", "bucketName, file, prefix
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "443c623b-e0df-4868-a013-af4d028f3f2c", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "c7ddfac1e75c2ec65ec4f1bc6d38c4cecc2ad08b19169da94466b49f04ced368", "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Upload a local file to S3", "id": 126, "index": 126, "inputData": [ { "bucketName": { "constant": false, "value": "Bucket" }, "file": { "constant": false, "value": "iter.get(\"local\")" }, "prefix": { "constant": false, "value": "prefix or f\"{instance_id}/{str(datetime.date.today())}/\"" } } ], "inputschema": [ { "properties": { "bucketName": { "description": "Name of the bucket to upload into.", "title": "Bucket", "type": "string" }, "file": { "description": "Name of the local file to upload into bucket. Eg /tmp/file-to-upload", "title": "File", "type": "string" }, "prefix": { "default": "", "description": "Prefix to attach to get the final object name to be used in the bucket.", "title": "Prefix", "type": "string" } }, "required": [ "bucketName", "file" ], "title": "aws_upload_file_to_s3", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": "", "iter_list": { "constant": false, "objectItems": false, "value": "mapping" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Upload file to S3", "nouns": [ "aws", "bucket", "file" ], "orderProperties": [ "bucketName", "file", "prefix" ], "output": { "type": "" }, "outputParams": { "output_name": "upload_output", "output_name_enabled": true }, "printOutput": true, "tags": [ "aws_upload_file_to_s3" ], "title": "Upload file to S3", "verbs": [ "put", "upload" ] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_upload_file_to_s3(handle, bucketName: str, file: str, prefix: str = \"\"):\n", "\n", " s3 = handle.client('s3')\n", " objName = prefix + file.split(\"/\")[-1]\n", " try:\n", " with open(file, \"rb\") as f:\n", " s3.upload_fileobj(f, bucketName, objName)\n", " except Exception as e:\n", " print(f\"Error: {e}\")\n", " raise e\n", "\n", " print(f\"Successfully copied {file} to bucket:{bucketName} object:{objName}\")\n", " return None\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(inputParamsJson='''{\n", " \"bucketName\": \"Bucket\",\n", " \"file\": \"iter.get(\\\\\"local\\\\\")\",\n", " \"prefix\": \"prefix or f\\\\\"{instance_id}/{str(datetime.date.today())}/\\\\\"\"\n", " }''')\n", "\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"mapping\",\n", " \"iter_parameter\": \"\"\n", " }''')\n", "task.configure(outputName=\"upload_output\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.output = task.execute(aws_upload_file_to_s3, hdl=hdl, args=args)\n", " if task.output_name != None:\n", " globals().update({task.output_name: task.output[0]})\n", "\n", "if hasattr(task, 'output'):\n", " if isinstance(task.output, (list, tuple)):\n", " for item in task.output:\n", " print(f'item: {item}')\n", " elif isinstance(task.output, dict):\n", " for item in task.output.items():\n", " print(f'item: {item}')\n", " else:\n", " print(f'Output for {task.name}')\n", " print(task.output)\n", " w.tasks[task.name]= task.output" ] }, { "cell_type": "markdown", "id": "f8431944", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-5", "orderProperties": [], "tags": [], "title": "Step-5" }, "source": [ "Output variable:
\n", "upload_output
Here we will use unSkript SSH Execute Remote Command action. This action is used to SSH Execute Remote Commands to remove files.
\n", "\n", "\n", "Input parameters:
\n", "hosts, command, sudo
\n", "\n", "Output variable:
\n", "remove_output
" ] }, { "cell_type": "code", "execution_count": null, "id": "eed84b79-c7db-4950-a5e2-5ec66eb72cea", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "5279b2046bb2eb4a691ba748086f4af9e580a849faae557694bb12a8c2b7b379", "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "SSH Execute Remote Command", "id": 58, "index": 58, "inputData": [ { "command": { "constant": false, "value": "\"rm -v \" + \" \".join(remote_files)" }, "hosts": { "constant": false, "value": "[ ssh_ip ]" }, "sudo": { "constant": true, "value": false } } ], "inputschema": [ { "properties": { "command": { "description": "Command to be executed on the remote server.", "title": "Command", "type": "string" }, "hosts": { "description": "List of hosts to connect to. For eg. [\"host1\", \"host2\"].", "items": { "type": "string" }, "title": "Hosts", "type": "array" }, "sudo": { "default": false, "description": "Run the command with sudo.", "title": "Run with sudo", "type": "boolean" } }, "required": [ "hosts", "command" ], "title": "ssh_execute_remote_command", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_SSH", "name": "SSH Execute Remote Command: Remove Files", "nouns": [ "ssh", "command" ], "orderProperties": [ "hosts", "command", "sudo" ], "output": { "type": "" }, "outputParams": { "output_name": "remove_output", "output_name_enabled": true }, "printOutput": true, "tags": [ "ssh_execute_remote_command" ], "title": "SSH Execute Remote Command: Remove Files", "verbs": [ "execute" ] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "import json\n", "import tempfile\n", "import os\n", "from pydantic import BaseModel, Field\n", "from pssh.clients import ParallelSSHClient\n", "from typing import List, Optional\n", "from unskript.connectors import ssh\n", "\n", "from unskript.legos.cellparams import CellParams\n", "from unskript import connectors\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def ssh_execute_remote_command(sshClient, hosts: List[str], command: str, sudo: bool = False):\n", "\n", " client = sshClient(hosts, None)\n", " runCommandOutput = client.run_command(command=command, sudo=sudo)\n", " client.join()\n", " res = {}\n", "\n", " for host_output in runCommandOutput:\n", " hostname = host_output.host\n", " output = []\n", " for line in host_output.stdout:\n", " output.append(line)\n", " res[hostname] = output\n", "\n", " o = \"\\n\".join(output)\n", " print(f\"Output from host {hostname}\\n{o}\\n\")\n", "\n", " return res\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(inputParamsJson='''{\n", " \"command\": \"\\\\\"rm -v \\\\\" + \\\\\" \\\\\".join(remote_files)\",\n", " \"hosts\": \"[ ssh_ip ]\",\n", " \"sudo\": \"False\"\n", " }''')\n", "task.configure(outputName=\"remove_output\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.output = task.execute(ssh_execute_remote_command, hdl=hdl, args=args)\n", " if task.output_name != None:\n", " globals().update({task.output_name: task.output[0]})\n", "\n", "if hasattr(task, 'output'):\n", " if isinstance(task.output, (list, tuple)):\n", " for item in task.output:\n", " print(f'item: {item}')\n", " elif isinstance(task.output, dict):\n", " for item in task.output.items():\n", " print(f'item: {item}')\n", " else:\n", " print(f'Output for {task.name}')\n", " print(task.output)\n", " w.tasks[task.name]= task.output" ] }, { "cell_type": "markdown", "id": "ea669eec-12ae-4097-aaf6-22280d7d2f8b", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-5 Extension", "orderProperties": [], "tags": [], "title": "Step-5 Extension" }, "source": [ "
This action is an extension of Step 5 where we will clean up the files locally.
" ] }, { "cell_type": "code", "execution_count": 8, "id": "cfdd871c-1713-49fd-973e-ca852993354b", "metadata": { "actionNeedsCredential": false, "actionSupportsIteration": false, "actionSupportsPoll": false, "credentialsJson": {}, "jupyter": { "source_hidden": true }, "name": "Clean up local files", "orderProperties": [], "tags": [], "title": "Clean up local files" }, "outputs": [], "source": [ "from subprocess import PIPE, run\n", "\n", "o = run(f\"rm -fv {' '.join(local_files)}\", stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)\n", "print(o.stdout)" ] }, { "cell_type": "markdown", "id": "785e11cd", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-6", "orderProperties": [], "tags": [], "title": "Step-6" }, "source": [ "Here we will use unSkript Post Slack Message action. This action is used to post the message to the slack channel.
\n", "\n", "\n", "Input parameters:
\n", "channel, message
\n", "\n", "Output variable:
\n", "slack_status
" ] }, { "cell_type": "code", "execution_count": null, "id": "b3f91610-73fc-4f57-93db-203fe91aa4cb", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "6a87f83ab0ecfeecb9c98d084e2b1066c26fa64be5b4928d5573a5d60299802d", "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Post Slack Message", "id": 44, "index": 44, "inputData": [ { "channel": { "constant": false, "value": "channel" }, "message": { "constant": false, "value": "f\"Deleted {len(remote_files)} files from host {ssh_ip}\"" } } ], "inputschema": [ { "properties": { "channel": { "description": "Name of the slack channel where the message to be posted", "title": "Channel", "type": "string" }, "message": { "description": "Message to be sent", "title": "Message", "type": "string" } }, "required": [ "channel", "message" ], "title": "slack_post_message", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_SLACK", "name": "Post Slack Message", "nouns": [ "slack", "message" ], "orderProperties": [ "channel", "message" ], "output": { "type": "" }, "outputParams": { "output_name": "slack_status", "output_name_enabled": true }, "printOutput": true, "tags": [ "slack_post_message" ], "title": "Post Slack Message", "verbs": [ "post" ] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "\n", "import pprint\n", "\n", "from pydantic import BaseModel, Field\n", "from slack_sdk import WebClient\n", "from slack_sdk.errors import SlackApiError\n", "\n", "pp = pprint.PrettyPrinter(indent=2)\n", "\n", "\n", "from beartype import beartype\n", "def legoPrinter(func):\n", " def Printer(*args, **kwargs):\n", " output = func(*args, **kwargs)\n", " if output:\n", " channel = kwargs[\"channel\"]\n", " pp.pprint(print(f\"Message sent to Slack channel {channel}\"))\n", " return output\n", " return Printer\n", "\n", "\n", "@legoPrinter\n", "@beartype\n", "def slack_post_message(\n", " handle: WebClient,\n", " channel: str,\n", " message: str) -> bool:\n", "\n", " try:\n", " response = handle.chat_postMessage(\n", " channel=channel,\n", " text=message)\n", " return True\n", " except SlackApiError as e:\n", " print(\"\\n\\n\")\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.response['error']}\")\n", " return False\n", " except Exception as e:\n", " print(\"\\n\\n\")\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.__str__()}\")\n", " return False\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(inputParamsJson='''{\n", " \"channel\": \"channel\",\n", " \"message\": \"f\\\\\"Deleted {len(remote_files)} files from host {ssh_ip}\\\\\"\"\n", " }''')\n", "task.configure(outputName=\"slack_status\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.output = task.execute(slack_post_message, hdl=hdl, args=args)\n", " if task.output_name != None:\n", " globals().update({task.output_name: task.output[0]})" ] }, { "cell_type": "markdown", "id": "1006351c", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "
In this Runbook, we demonstrated the use of unSkript's AWS and SSH lego to perform AWS and SSH actions and this runbook locates large files in a given path inside an EC2 instance and backs them up into a given S3 bucket. Afterwards, it deletes the files backed up and sends a message on slack. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "AWS EC2 Disk Cleanup", "parameters": [ "instance_id", "prefix", "region", "Bucket", "Threshold", "dirs_to_anaylze" ] }, "kernelspec": { "display_name": "unSkript (Build: 1105)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "parameterSchema": { "properties": { "Bucket": { "description": "S3 Bucket for archiving", "title": "Bucket", "type": "string" }, "Threshold": { "default": 100, "description": "Threshold on file size (in Mb)", "title": "Threshold", "type": "number" }, "channel": { "description": "Slack channel to send messages.", "title": "channel", "type": "string" }, "dirs_to_anaylze": { "default": "/home", "description": "Root for directories to be analyzed for large files", "title": "dirs_to_anaylze", "type": "string" }, "instance_id": { "description": "EC2 Instance", "title": "instance_id", "type": "string" }, "prefix": { "default": "test/", "description": "Prefix to use while uploading to S3 (default:1. Get AWS ALB Listeners Without HTTP Redirection.
2. AWS Modify ALB Listeners HTTP Redirection.
Here we will use unSkript Get AWS ALB Listeners Without HTTP Redirection action. In this action, we will check for listener configuration for HTTP redirection and return a list of listener ARNs that don't have HTTP redirection.
\n", "\n", "\n", "Input parameters:
\n", "region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "9d6c0f6e-13e1-4269-9205-87e87f891432", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ELB" ], "actionDescription": "Get AWS ALB Listeners Without HTTP Redirection", "actionEntryFunction": "aws_get_alb_listeners_without_http_redirect", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "7d87da036fb983f7909a22a01529790dddc5179ebbb8f95517a66314d236555c" ], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Get AWS ALB Listeners Without HTTP Redirection", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "e84fa689b445924888abced31fe69f0edfcad2ea9135f175ce1897d86f04e6cd", "condition_enabled": true, "continueOnError": false, "description": "Get AWS ALB Listeners Without HTTP Redirection", "id": 1, "index": 1, "inputData": [ { "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region of the ALB listeners.", "title": "Region", "type": "string" } }, "required": [], "title": "aws_get_alb_listeners_without_http_redirect", "type": "object" } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Get AWS ALB Listeners Without HTTP Redirection", "orderProperties": [ "region" ], "outputParams": { "output_name": "listener_arns", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not alb_listener_arns", "tags": [ "aws_get_alb_listeners_without_http_redirect" ], "uuid": "e84fa689b445924888abced31fe69f0edfcad2ea9135f175ce1897d86f04e6cd", "version": "1.0.0", "credentialsJson": {} }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import Optional, Tuple\n", "from pydantic import BaseModel, Field\n", "from unskript.connectors.aws import aws_get_paginator\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "from unskript.legos.aws.aws_list_application_loadbalancers.aws_list_application_loadbalancers import aws_list_application_loadbalancers\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_alb_listeners_without_http_redirect_printer(output):\n", " if output is None:\n", " return\n", "\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_get_alb_listeners_without_http_redirect(handle, region: str = \"\") -> Tuple:\n", " \"\"\"aws_get_alb_listeners_without_http_redirect List of ALB listeners without HTTP redirection.\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: string\n", " :param region: Region to filter ALB listeners.\n", "\n", " :rtype: Tuple of status result and list of ALB listeners without HTTP redirection.\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " alb_list = []\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", "\n", " for reg in all_regions:\n", " try:\n", " alb_dict = {}\n", " loadbalancer_arn = aws_list_application_loadbalancers(handle, reg)\n", " alb_dict[\"region\"] = reg\n", " alb_dict[\"alb_arn\"] = loadbalancer_arn\n", " alb_list.append(alb_dict)\n", " except Exception as error:\n", " pass\n", "\n", " for alb in alb_list:\n", " try:\n", " ec2Client = handle.client('elbv2', region_name=alb[\"region\"])\n", " for load in alb[\"alb_arn\"]:\n", " response = aws_get_paginator(ec2Client, \"describe_listeners\", \"Listeners\",\n", " LoadBalancerArn=load)\n", " for listner in response:\n", " if 'SslPolicy' not in listner:\n", " resp = aws_get_paginator(ec2Client, \"describe_rules\", \"Rules\",\n", " ListenerArn=listner['ListenerArn'])\n", " for rule in resp:\n", " for action in rule['Actions']:\n", " listener_dict = {}\n", " if action['Type'] != 'redirect':\n", " listener_dict[\"region\"] = alb[\"region\"]\n", " listener_dict[\"listener_arn\"] = listner['ListenerArn']\n", " result.append(listener_dict)\n", " except Exception as error:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not alb_listener_arns\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"listener_arns\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_alb_listeners_without_http_redirect, lego_printer=aws_get_alb_listeners_without_http_redirect_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "75375134-5683-43a5-b814-b37326b2daab", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 Extension", "orderProperties": [], "tags": [], "title": "Step-1 Extension" }, "source": [ "Output variable:
\n", "listener_arns
In this action, we modify the output from step 2 and return a list of dictionary items for the Listener's ARNs.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 22, "id": "e8d7cf3f-c738-4ed2-b735-08464a6cb712", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-01-30T15:00:54.938Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Modify Listeners ARNs Output", "orderProperties": [], "tags": [], "title": "Modify Listeners ARNs Output" }, "outputs": [], "source": [ "import re \n", "import json\n", "from unskript.legos.utils import parseARN\n", "\n", "\n", "arns_list = []\n", "try:\n", " if listener_arns[0] == False:\n", " for listener in listener_arns[1]:\n", " arns_list.append(listener)\n", "except Exception as e:\n", " if alb_listener_arns:\n", " for i in alb_listener_arns:\n", " arn_dict = {}\n", " parsedArn = parseARN(i)\n", " arn_dict[\"region\"] = parsedArn[\"region\"]\n", " arn_dict[\"listener_arn\"] = i\n", " arns_list.append(arn_dict)\n", " else:\n", " raise Exception(e)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "03516737-23d3-45cc-bcff-927d74635c82", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "Output variable:
\n", "arn_list
Here we will use unSkript AWS Modify ALB Listeners HTTP Redirection action. In this action, we will modify a listener's configuration for HTTP redirection to the listener, which we get from step 2. This action only executes when len(Listener_ARNs)>0.
\n", "\n", "\n", "Input parameters:
\n", "listener_arn,region
\n", "" ] }, { "cell_type": "code", "execution_count": 6, "id": "e6461e22-733d-4665-8e51-5e6d755c0c82", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "f0e5d5038aad3efc10cd1cc79b27571c08d672b6b8c5cdd57e8bd5b78c23b001", "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "AWS Modify ALB Listeners HTTP Redirection", "execution_data": { "last_date_success_run_cell": "2022-09-16T19:37:19.193Z" }, "id": 149, "index": 149, "inputData": [ { "listener_arn": { "constant": false, "value": "\"iter.get(\\\\\"listener_arn\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "listener_arn": { "description": "listener ARNs.", "title": "ListenerArn", "type": "string" }, "region": { "description": "AWS Region of the ALB listeners.", "title": "Region", "type": "string" } }, "required": [ "listener_arn", "region" ], "title": "aws_modify_listener_for_http_redirection", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "listener_arn": "listener_arn", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "arns_list" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Modify ALB Listeners HTTP Redirection", "nouns": [ "listeners", "loadbalancers" ], "orderProperties": [ "listener_arn", "region" ], "output": { "type": "" }, "outputParams": { "output_name": "modified_output", "output_name_enabled": true }, "printOutput": true, "startcondition": "len(arns_list)>0", "tags": [ "aws_modify_listener_for_http_redirection" ], "verbs": [ "modify" ] }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import List, Dict\n", "from pydantic import BaseModel, Field\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_modify_listener_for_http_redirection_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_modify_listener_for_http_redirection(handle, listener_arn: str, region: str) -> List:\n", " \"\"\"aws_modify_listener_for_http_redirection List of Dict with modified listener info.\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type listener_arn: string\n", " :param listener_arn: List of LoadBalancerArn.\n", "\n", " :type region: string\n", " :param region: Region to filter ALB listeners.\n", "\n", " :rtype: List of Dict with modified ALB listeners info.\n", " \"\"\"\n", " listner_config = [{\n", " \"Type\": \"redirect\",\n", " \"Order\": 1,\n", " \"RedirectConfig\": {\n", " \"Protocol\": \"HTTPS\",\n", " \"Host\": \"#{host}\",\n", " \"Query\": \"#{query}\",\n", " \"Path\": \"/#{path}\",\n", " \"Port\": \"443\",\n", " \"StatusCode\": \"HTTP_302\"}}]\n", " result = []\n", " try:\n", " #if ALB_Name in listener_arn:\n", " ec2Client = handle.client('elbv2', region_name=region)\n", " response = ec2Client.modify_listener(ListenerArn=listener_arn,\n", " DefaultActions=listner_config)\n", " result.append(response)\n", "\n", " except Exception as error:\n", " result.append(error)\n", "\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\",\n", " \"listener_arn\": \"iter.get(\\\\\"listener_arn\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"arns_list\",\n", " \"iter_parameter\": [\"listener_arn\",\"region\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(arns_list)>0\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"modified_output\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_modify_listener_for_http_redirection, lego_printer=aws_modify_listener_for_http_redirection_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "ddfe6833-aaf9-42b5-aa00-d759b2921ed0", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "### Conclusion\n", "Output variable:
\n", "Modified_Output
In this Runbook, we demonstrated the use of unSkript's AWS actions and this runbook find out all the Application Load Balancer listeners without HTTP redirection and modify them for HTTP redirection. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Enforce HTTP Redirection across all AWS ALB instances", "parameters": [ "alb_listener_arns", "region" ] }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "parameterSchema": { "properties": { "alb_listener_arns": { "description": "Listeners ARNs where HTTP redirection needs to be added.", "title": "alb_listener_arns", "type": "array" }, "region": { "description": "AWS region e.g. us-west-2", "title": "region", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "e8899eb02dfbc033aab5733bdae1bd213fa031d40331094008e8673d99ebab63" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Enforce_HTTP_Redirection_across_AWS_ALB.json ================================================ { "name": "Enforce HTTP Redirection across all AWS ALB instances", "description": "This runbook can be used to enforce HTTP redirection across all AWS ALBs. Web encryption protocols like SSL and TLS have been around for nearly three decades. By securing web data in transit, these security measures ensure that third parties can’t simply intercept unencrypted data and cause harm. HTTPS uses the underlying SSL/TLS technology and is the standard way to communicate web data in an encrypted and authenticated manner instead of using insecure HTTP protocol. In this runbook, we implement the industry best practice of redirecting all unencrypted HTTP data to the secure HTTPS protocol.", "uuid": "7d87da036fb983f7909a22a01529790dddc5179ebbb8f95517a66314d236555c", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SECOPS" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Ensure_Redshift_Clusters_have_Paused_Resume_Enabled.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "e2fffe48-5eb4-4177-95ec-7955cc381ad8", "metadata": { "jupyter": { "source_hidden": false }, "name": "Runbook Overview", "orderProperties": [], "tags": [], "title": "Runbook Overview" }, "source": [ "Here we will use unSkript AWS Find Redshift Cluster without Pause Resume Enabled action. This action filters all the redshift clusters from the given region and returns a list of clusters that don't have pause resume enabled.
\n", "\n", "\n", "Input parameters:
\n", "region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "3287a7ff-59c3-41e4-85e6-cc79a6969396", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2", "CATEGORY_TYPE_DB" ], "actionDescription": "Use This Action to AWS find redshift cluster for which paused resume are not Enabled", "actionEntryFunction": "aws_find_redshift_cluster_without_pause_resume_enabled", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Find Redshift Cluster without Pause Resume Enabled", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "433eede3d0f6e49e242c1c0f624617df7212a210e1fd5cde8cec0202d2b972aa", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "Use This Action to AWS find redshift cluster for which paused resume are not Enabled", "execution_data": { "last_date_success_run_cell": "2023-05-19T04:42:11.658Z" }, "id": 11, "index": 11, "inputData": [ { "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [], "title": "aws_find_redshift_cluster_without_pause_resume_enabled", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Find Redshift Cluster without Pause Resume Enabled", "orderProperties": [ "region" ], "outputParams": { "output_name": "clusters", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not redshift_clusters", "tags": [], "title": "AWS Find Redshift Cluster without Pause Resume Enabled", "uuid": "433eede3d0f6e49e242c1c0f624617df7212a210e1fd5cde8cec0202d2b972aa", "version": "1.0.0" }, "outputs": [], "source": [ "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Tuple\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "from unskript.connectors.aws import aws_get_paginator\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_find_redshift_cluster_without_pause_resume_enabled_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_find_redshift_cluster_without_pause_resume_enabled(handle, region: str = \"\") -> Tuple:\n", " \"\"\"aws_find_redshift_cluster_without_pause_resume_enabled Gets all redshift cluster which don't have pause and resume not enabled.\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :rtype: Tuple with the status result and a list of all redshift clusters that don't have pause and resume enabled.\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", " for reg in all_regions:\n", " try:\n", " redshift_Client = handle.client('redshift', region_name=reg)\n", " response = aws_get_paginator(redshift_Client, \"describe_clusters\", \"Clusters\")\n", " for cluster in response:\n", " cluster_dict = {}\n", " cluster_name = cluster[\"ClusterIdentifier\"]\n", " schedule_actions = aws_get_paginator(redshift_Client, \"describe_scheduled_actions\", \"ScheduledActions\",Filters=[{'Name': 'cluster-identifier', 'Values': [cluster_name]}])\n", "\n", " if schedule_actions:\n", " for actions in schedule_actions:\n", " if \"ResumeCluster\" in actions[\"TargetAction\"].keys() or \"PauseCluster\" in actions[\"TargetAction\"].keys():\n", " pass\n", " else:\n", " cluster_dict[\"cluster_name\"] = cluster_name\n", " cluster_dict[\"region\"] = reg\n", " result.append(cluster_dict)\n", " else:\n", " cluster_dict[\"cluster_name\"] = cluster_name\n", " cluster_dict[\"region\"] = reg\n", " result.append(cluster_dict)\n", " except Exception as error:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not redshift_clusters\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"clusters\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_find_redshift_cluster_without_pause_resume_enabled, lego_printer=aws_find_redshift_cluster_without_pause_resume_enabled_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "0f79562c-f105-49d3-beb0-0b5456c3c805", "metadata": { "name": "Gathering Information", "orderProperties": [], "tags": [], "title": "Gathering Information" }, "source": [ "Output variable:
\n", "clusters
In this action, we use Run Command via AWS CLI action to get IAM role ARN for schedule pause resume.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "f8a5cc43-4e9f-4011-bdcd-6cbd0d3a6596", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_CLI" ], "actionDescription": "Execute command using AWS CLI", "actionEntryFunction": "aws_execute_cli_command", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_STR", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Run Command via AWS CLI", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "1db371aff42291641eb6ba83d7acc3fe28e2468d83be1552e8258dc878c0f70d", "continueOnError": false, "credentialsJson": {}, "description": "Execute command using AWS CLI", "execution_data": { "last_date_success_run_cell": "2023-05-19T04:52:30.867Z" }, "id": 1, "index": 1, "inputData": [ { "aws_command": { "constant": false, "value": "\"aws iam get-role --role-name scheduler.redshift.amazonaws.com --query 'Role.Arn' --output text\"" } } ], "inputschema": [ { "properties": { "aws_command": { "description": "AWS Command eg \"aws ec2 describe-instances\"", "title": "AWS Command", "type": "string" } }, "required": [ "aws_command" ], "title": "aws_execute_cli_command", "type": "object" } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Run Command via AWS CLI: Get IAM Role", "orderProperties": [ "aws_command" ], "outputParams": { "output_name": "iam_role_arn", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "tags": [], "title": "Run Command via AWS CLI: Get IAM Role", "uuid": "1db371aff42291641eb6ba83d7acc3fe28e2468d83be1552e8258dc878c0f70d", "version": "1.0.0" }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2021 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_execute_cli_command_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_execute_cli_command(handle, aws_command: str) -> str:\n", "\n", " result = handle.aws_cli_command(aws_command)\n", " if result is None or result.returncode != 0:\n", " print(\n", " f\"Error while executing command ({aws_command}): {result}\")\n", " return str()\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"aws_command\": \"\\\\\"aws iam get-role --role-name scheduler.redshift.amazonaws.com --query 'Role.Arn' --output text\\\\\"\"\n", " }''')\n", "task.configure(outputName=\"iam_role_arn\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_execute_cli_command, lego_printer=aws_execute_cli_command_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "b3b728cf-a318-4303-8be9-750fd811cdd7", "metadata": { "name": "Step-1 Extension", "orderProperties": [], "tags": [], "title": "Step-1 Extension" }, "source": [ "Output variable:
\n", "iam_role_arn
In this action, we modify the output from step 1 and return a list of dictionaries for schedule pause resume in the redshift cluster.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 54, "id": "5cbcb4b2-149f-43f7-b723-e2f3766c9980", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-05-19T04:54:58.091Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Modify Step-1 Output", "orderProperties": [], "tags": [], "title": "Modify Step-1 Output" }, "outputs": [], "source": [ "schedule_cluster_details = []\n", "try:\n", " if clusters[0] == False:\n", " for instance in clusters[1]:\n", " instance[\"iam_role\"] = iam_role_arn\n", " instance[\"pause_schedule_expression\"] = pause_schedule_expression\n", " instance[\"resume_schedule_expression\"] = resume_schedule_expression\n", " schedule_cluster_details.append(instance)\n", "except Exception as e:\n", " for i in redshift_clusters:\n", " instance = {}\n", " instance[\"cluster_name\"] = i\n", " instance[\"region\"] = region\n", " instance[\"iam_role\"] = iam_role_arn\n", " instance[\"pause_schedule_expression\"] = pause_schedule_expression\n", " instance[\"resume_schedule_expression\"] = resume_schedule_expression\n", " schedule_cluster_details.append(instance)\n", " else:\n", " raise Exception(e)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "d1f1a3bf-e7d4-4243-8a99-6e1b66abef29", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "Output variable:
\n", "schedule_cluster_details
AWS Schedule Redshift Cluster Pause Resume Enabled
\n", "In this action, we pass all details collected from the step1 and schedule pause resume redshift cluster.
\n", "\n", "\n", "Input parameters:
\n", "iam_role_arn, cluster_name, region, pause_schedule_expression, resume_schedule_expression
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "de3a0d4c-4a05-4546-98ab-f0abea87594a", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2", "CATEGORY_TYPE_DB" ], "actionDescription": "Use This Action to AWS find redshift cluster for which paused resume are not Enabled", "actionEntryFunction": "aws_find_redshift_cluster_without_pause_resume_enabled", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Find Redshift Cluster without Pause Resume Enabled", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": true, "action_uuid": "433eede3d0f6e49e242c1c0f624617df7212a210e1fd5cde8cec0202d2b972aa", "collapsed": true, "continueOnError": true, "credentialsJson": {}, "description": "Use This Action to AWS find redshift cluster for which paused resume are not Enabled", "execution_data": { "last_date_success_run_cell": "2023-05-19T04:30:55.971Z" }, "id": 12, "index": 12, "inputData": [ { "cluster_name": { "constant": false, "value": "\"iter.get(\\\\\"cluster_name\\\\\")\"" }, "iam_role_arn": { "constant": false, "value": "\"iter.get(\\\\\"iam_role\\\\\")\"" }, "pause_schedule_expression": { "constant": false, "value": "\"iter.get(\\\\\"pause_schedule_expression\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" }, "resume_schedule_expression": { "constant": false, "value": "\"iter.get(\\\\\"resume_schedule_expression\\\\\")\"" } } ], "inputschema": [ { "properties": { "cluster_name": { "default": "", "description": "Name of the redshift cluster", "title": "cluster_name", "type": "string" }, "iam_role_arn": { "default": "", "description": "IAM role ARN for schedule redshift pause resume", "title": "iam_role_arn", "type": "string" }, "pause_schedule_expression": { "default": "", "description": "The cron expression for the pause schedule.", "title": "pause_schedule_expression", "type": "string" }, "region": { "default": "", "description": "AWS Region.", "title": "Region", "type": "string" }, "resume_schedule_expression": { "default": "", "description": "The cron expression for the resume schedule.", "title": "resume_schedule_expression", "type": "string" } }, "required": [ "region", "iam_role_arn", "cluster_name", "pause_schedule_expression", "resume_schedule_expression" ], "title": "aws_find_redshift_cluster_without_pause_resume_enabled", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "cluster_name": "cluster_name", "iam_role_arn": "iam_role", "pause_schedule_expression": "pause_schedule_expression", "region": "region", "resume_schedule_expression": "resume_schedule_expression" }, "iter_list": { "constant": false, "objectItems": true, "value": "schedule_cluster_details" } } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Schedule Redshift Cluster Pause Resume Enabled", "orderProperties": [ "region", "iam_role_arn", "cluster_name", "pause_schedule_expression", "resume_schedule_expression" ], "outputParams": { "output_name": "schedule_info", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "tags": [ "aws_find_redshift_cluster_without_pause_resume_enabled" ], "title": "AWS Schedule Redshift Cluster Pause Resume Enabled", "uuid": "433eede3d0f6e49e242c1c0f624617df7212a210e1fd5cde8cec0202d2b972aa", "version": "1.0.0" }, "outputs": [], "source": [ "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, List\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_schedule_pause_resume_enabled_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_schedule_pause_resume_enabled(handle,\n", " iam_role_arn: str,\n", " cluster_name: str,\n", " region: str,\n", " pause_schedule_expression: str,\n", " resume_schedule_expression: str) -> List:\n", " \"\"\"aws_schedule_pause_resume_enabled schedule pause and resume enabled.\n", "\n", " :type iam_role_arn: str\n", " :param iam_role_arn: The ARN of the IAM role.\n", "\n", " :type cluster_name: str\n", " :param cluster_name: The name of the Redshift cluster.\n", "\n", " :type region: str\n", " :param region: AWS Region.\n", "\n", " :type pause_schedule_expression: str\n", " :param pause_schedule_expression: The cron expression for the pause schedule.\n", "\n", " :type resume_schedule_expression: str\n", " :param resume_schedule_expression: The cron expression for the resume schedule.\n", "\n", " :rtype: List\n", " :return: A list of pause and resume enabled status.\n", " \"\"\"\n", " result = []\n", " pause_action_name = f\"{cluster_name}-scheduled-pause\"\n", " resume_action_name = f\"{cluster_name}-scheduled-resume\"\n", "\n", " try:\n", " redshift_client = handle.client('redshift', region_name=region)\n", " # Schedule pause action\n", " response_pause = redshift_client.create_scheduled_action(\n", " ScheduledActionName=pause_action_name,\n", " TargetAction={\n", " 'PauseCluster': {'ClusterIdentifier': cluster_name}\n", " },\n", " Schedule=pause_schedule_expression,\n", " IamRole=iam_role_arn,\n", " Enable=True\n", " )\n", " result.append(response_pause)\n", " # Schedule resume action\n", " response_resume = redshift_client.create_scheduled_action(\n", " ScheduledActionName=resume_action_name,\n", " TargetAction={\n", " 'ResumeCluster': {'ClusterIdentifier': cluster_name}\n", " },\n", " Schedule=resume_schedule_expression,\n", " IamRole=iam_role_arn,\n", " Enable=True\n", " )\n", " result.append(response_resume)\n", "\n", " except Exception as error:\n", " print(error)\n", "\n", " return result\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\",\n", " \"iam_role_arn\": \"iter.get(\\\\\"iam_role\\\\\")\",\n", " \"cluster_name\": \"iter.get(\\\\\"cluster_name\\\\\")\",\n", " \"pause_schedule_expression\": \"iter.get(\\\\\"pause_schedule_expression\\\\\")\",\n", " \"resume_schedule_expression\": \"iter.get(\\\\\"resume_schedule_expression\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"schedule_cluster_details\",\n", " \"iter_parameter\": [\"region\",\"iam_role_arn\",\"cluster_name\",\"pause_schedule_expression\",\"resume_schedule_expression\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(schedule_cluster_details) > 0\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"schedule_info\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_schedule_pause_resume_enabled, lego_printer=aws_schedule_pause_resume_enabled_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "37022260-01cb-4cb7-9ed1-aeb30ac4ad64", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "Output variable:
\n", "schedule_info
In this Runbook, we demonstrated using unSkript's AWS actions to enable Redshift clusters that don't have pause resume enabled and enable the pause resume to those clusters. To view the full platform capabunscriptedof unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "AWS Ensure Redshift Clusters have Paused Resume Enabled", "parameters": null }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "parameterSchema": { "properties": { "pause_schedule_expression": { "default": "cron(0 0 ? * 7,1 *)", "description": "The cron expression for the pause schedule.e.g. cron(0 0 * * 6-7\n)\nIn these expressions:\n\n0 0 represents 12:00 AM (midnight).\n? is used for the day of the month field.\n* means all possible values for the month field.\n7,1 specifies Saturday (7) and Sunday (1) for the pause schedule.", "title": "pause_schedule_expression", "type": "string" }, "redshift_clusters": { "description": "List of Redshift clusters where pause resume needs to be implemented.", "title": "redshift_clusters", "type": "array" }, "region": { "description": "AWS Region", "title": "region", "type": "string" }, "resume_schedule_expression": { "default": "cron(0 0 ? * 2 *)", "description": "The cron expression for the resume schedule.e.g. cron(0 0 ? * 2 *)\n\n\nIn these expressions:\n\n0 0 represents 12:00 AM (midnight).\n? is used for the day of the month field.\n* means all possible values for the month field.\n2 represents Monday for the resume schedule.", "title": "resume_schedule_expression", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "show_action_drag_hint_done": { "environment_id": "1499f27c-6406-4fbd-bd1b-c6f92800018f", "environment_name": "Staging", "execution_id": "", "inputs_for_searched_lego": "", "notebook_id": "d4159cb3-6c83-4ba5-a2f7-d23c0777076b.ipynb", "parameters": null, "runbook_name": "gcp", "search_string": "", "show_tool_tip": true, "tenant_id": "982dba5f-d9df-48ae-a5bf-ec1fc94d4882", "tenant_url": "https://tenant-staging.alpha.unskript.io", "user_email_id": "support+staging@unskript.com", "workflow_id": "f8ead207-81c0-414a-a15b-76fcdefafe8d" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Ensure_Redshift_Clusters_have_Paused_Resume_Enabled.json ================================================ { "name": "AWS Ensure Redshift Clusters have Paused Resume Enabled", "description": "This runbook finds redshift clusters that don't have pause resume enabled and schedules the pause resume for the cluster.", "uuid": "8b9c4eadb5f2fb817be0952f3ecb28c8e490ece6281286a74a95d5fe25019400", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Get_Elb_Unhealthy_Instances.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "c2072425", "metadata": { "jupyter": { "source_hidden": false }, "name": "Runbook Overview", "orderProperties": [], "tags": [], "title": "Runbook Overview" }, "source": [ "1. Get Unhealthy instances from ELB
2. Post Slack Message\n",
"
Here we will use unSkript Get Unhealthy instances from ELB action. This action is used to get all unhealthy instances from ELB, the instances which are out of service are considered unhealthy instances.
\n", "\n", "\n", "Input parameters:
\n", "elb_name,region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "19d75911-e82d-4712-b0ad-d4e5ebb0da1d", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ELB" ], "actionDescription": "Get Unhealthy instances from Elastic Load Balancer", "actionEntryFunction": "aws_get_unhealthy_instances_from_elb", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "94707558cebedbcb77aabaec5d6d2d1bf3f4664db6e9e905d6d905a11a3ef8bc" ], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Get Unhealthy instances from ELB", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "6d2964252c14fd1439bdefd224d147ac75fc7fe06036c6d0956081fa45505139", "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "Get Unhealthy instances from Elastic Load Balancer", "execution_data": { "last_date_success_run_cell": "2023-05-19T11:35:19.835Z" }, "id": 7, "index": 7, "inputData": [ { "elb_name": { "constant": false, "value": "elb_name" }, "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "elb_name": { "default": "", "description": "Name of the elastic load balancer.", "title": "ELB Name", "type": "string" }, "region": { "default": "", "description": "AWS Region of the ELB.", "title": "Region", "type": "string" } }, "required": [], "title": "aws_get_unhealthy_instances_from_elb", "type": "object" } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Get Unhealthy instances from ELB", "orderProperties": [ "elb_name", "region" ], "outputParams": { "output_name": "unhealthy_instances", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not elb_names", "tags": [ "aws_get_unhealthy_instances_from_elb" ], "uuid": "6d2964252c14fd1439bdefd224d147ac75fc7fe06036c6d0956081fa45505139", "version": "1.0.0" }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "import pprint\n", "from typing import Optional, Tuple\n", "from pydantic import BaseModel, Field\n", "from unskript.connectors.aws import aws_get_paginator\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_unhealthy_instances_from_elb_printer(output):\n", " if output is None:\n", " return\n", "\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_get_unhealthy_instances_from_elb(handle, elb_name: str = \"\", region: str = \"\") -> Tuple:\n", " \"\"\"aws_get_unhealthy_instances_from_elb gives unhealthy instances from ELB\n", "\n", " :type elb_name: string\n", " :param elb_name: Name of the elastic load balancer.\n", "\n", " :type region: string\n", " :param region: AWS region.\n", "\n", " :rtype: A tuple with execution results and a list of unhealthy instances from ELB\n", " \"\"\"\n", "\n", " result = []\n", " all_regions = [region]\n", " elb_list = []\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", "\n", " if not elb_name:\n", " for reg in all_regions:\n", " try:\n", " asg_client = handle.client('elb', region_name=reg)\n", " response = aws_get_paginator(\n", " asg_client,\n", " \"describe_load_balancers\",\n", " \"LoadBalancerDescriptions\"\n", " )\n", " for i in response:\n", " elb_dict = {}\n", " elb_dict[\"load_balancer_name\"] = i[\"LoadBalancerName\"]\n", " elb_dict[\"region\"] = reg\n", " elb_list.append(elb_dict)\n", " except Exception:\n", " pass\n", "\n", " if elb_name and not region:\n", " for reg in all_regions:\n", " try:\n", " asg_client = handle.client('elb', region_name=reg)\n", " response = aws_get_paginator(\n", " asg_client,\n", " \"describe_load_balancers\",\n", " \"LoadBalancerDescriptions\"\n", " )\n", " for i in response:\n", " if elb_name in i[\"LoadBalancerName\"]:\n", " elb_dict = {}\n", " elb_dict[\"load_balancer_name\"] = i[\"LoadBalancerName\"]\n", " elb_dict[\"region\"] = reg\n", " elb_list.append(elb_dict)\n", " except Exception:\n", " pass\n", "\n", " if elb_name and region:\n", " try:\n", " elbClient = handle.client('elb', region_name=region)\n", " res = elbClient.describe_instance_health(LoadBalancerName=elb_name)\n", " for instance in res['InstanceStates']:\n", " data_dict = {}\n", " if instance['State'] == \"OutOfService\":\n", " data_dict[\"instance_id\"] = instance[\"InstanceId\"]\n", " data_dict[\"region\"] = reg\n", " data_dict[\"load_balancer_name\"] = i[\"LoadBalancerName\"]\n", " result.append(data_dict)\n", " except Exception:\n", " pass\n", "\n", " for elb in elb_list:\n", " try:\n", " elbClient = handle.client('elb', region_name=elb[\"region\"])\n", " res = elbClient.describe_instance_health(LoadBalancerName=elb[\"load_balancer_name\"])\n", " for instance in res['InstanceStates']:\n", " data_dict = {}\n", " if instance['State'] == \"OutOfService\":\n", " data_dict[\"instance_id\"] = instance[\"InstanceId\"]\n", " data_dict[\"region\"] = reg\n", " data_dict[\"load_balancer_name\"] = i[\"LoadBalancerName\"]\n", " result.append(data_dict)\n", " except Exception:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"elb_name\": \"elb_name\",\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not elb_names\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"unhealthy_instances\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_unhealthy_instances_from_elb, lego_printer=aws_get_unhealthy_instances_from_elb_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "8fc2968d-700c-4264-84ab-9dbbeae25d3c", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 Extension", "orderProperties": [], "tags": [], "title": "Step-1 Extension" }, "source": [ "Output variable:
\n", "unhealthy_instances
In this action, we modify the output from step 1A and step 1B and return a list of dictionary items for the unhealthy instances from ELB.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 21, "id": "983ce208-f598-4c1e-ab9a-282e90ba5592", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-05-19T11:35:22.550Z" }, "jupyter": { "source_hidden": true }, "name": "Modify Output", "orderProperties": [], "tags": [], "title": "Modify Output" }, "outputs": [], "source": [ "elb_instance_list = []\n", "try:\n", " if unhealthy_instances:\n", " if unhealthy_instances[0] == False:\n", " for instance in unhealthy_instances[1]:\n", " elb_instance_list.append(instance)\n", "except Exception as e:\n", " raise Exception(e)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "061cdd14", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "Output variable: elb_instance_list
\n", "
Here we will use unSkript Post Slack Message action. This action takes channel: str and message: str as input. This input is used to post the message to the slack channel.
\n", "\n", "\n", "Input parameters:
\n", "message,channel
\n", "" ] }, { "cell_type": "code", "execution_count": 14, "id": "80e6665a-2c9a-4a33-89f8-ad221be338ec", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "6a87f83ab0ecfeecb9c98d084e2b1066c26fa64be5b4928d5573a5d60299802d", "condition_enabled": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Post Slack Message", "execution_data": { "last_date_success_run_cell": "2023-02-09T09:45:24.587Z" }, "id": 44, "index": 44, "inputData": [ { "channel": { "constant": false, "value": "channel" }, "message": { "constant": false, "value": "f\"Unhealthy instances for elb:{elb_instance_list}\"" } } ], "inputschema": [ { "properties": { "channel": { "description": "Name of the slack channel where the message to be posted", "title": "Channel", "type": "string" }, "message": { "description": "Message to be sent", "title": "Message", "type": "string" } }, "required": [ "channel", "message" ], "title": "slack_post_message", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_SLACK", "name": "Post Slack Message", "nouns": [ "slack", "message" ], "orderProperties": [ "channel", "message" ], "output": { "type": "" }, "outputParams": { "output_name": "message_status", "output_name_enabled": true }, "printOutput": true, "startcondition": "channel", "tags": [ "slack_post_message" ], "title": "Post Slack Message", "verbs": [ "post" ] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "\n", "import pprint\n", "\n", "from pydantic import BaseModel, Field\n", "from slack_sdk import WebClient\n", "from slack_sdk.errors import SlackApiError\n", "\n", "pp = pprint.PrettyPrinter(indent=2)\n", "\n", "\n", "from beartype import beartype\n", "def legoPrinter(func):\n", " def Printer(*args, **kwargs):\n", " output = func(*args, **kwargs)\n", " if output:\n", " channel = kwargs[\"channel\"]\n", " pp.pprint(print(f\"Message sent to Slack channel {channel}\"))\n", " return output\n", " return Printer\n", "\n", "\n", "@legoPrinter\n", "@beartype\n", "def slack_post_message(\n", " handle: WebClient,\n", " channel: str,\n", " message: str) -> bool:\n", "\n", " try:\n", " response = handle.chat_postMessage(\n", " channel=channel,\n", " text=message)\n", " return True\n", " except SlackApiError as e:\n", " print(\"\\n\\n\")\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.response['error']}\")\n", " return False\n", " except Exception as e:\n", " print(\"\\n\\n\")\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.__str__()}\")\n", " return False\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(inputParamsJson='''{\n", " \"channel\": \"channel\",\n", " \"message\": \"f\\\\\"Unhealthy instances for elb:{elb_instance_list}\\\\\"\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"channel\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"message_status\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.output = task.execute(slack_post_message, hdl=hdl, args=args)\n", " if task.output_name != None:\n", " globals().update({task.output_name: task.output[0]})" ] }, { "attachments": {}, "cell_type": "markdown", "id": "2fbfd774", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "Output variable:
\n", "message_status
In this Runbook, we demonstrated the use of unSkript's AWS and slack legos to perform AWS action and this runbook fetches the unHealthy EC2 instances for Classic ELB and posts to a slack channel. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "AWS Get unhealthy EC2 instances from ELB", "parameters": [ "channel", "elb_name", "region" ] }, "kernelspec": { "display_name": "unSkript (Build: 1169)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "parameterSchema": { "properties": { "channel": { "default": "", "description": "Slack channel to post to", "title": "channel", "type": "string" }, "elb_name": { "description": "ELB Name", "title": "elb_name", "type": "string" }, "region": { "description": "Region for the ELB instances", "title": "region", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": {}, "vscode": { "interpreter": { "hash": "e8899eb02dfbc033aab5733bdae1bd213fa031d40331094008e8673d99ebab63" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Get_Elb_Unhealthy_Instances.json ================================================ { "name": "AWS Get unhealthy EC2 instances from ELB", "description": "This runbook can be used to list unhealthy EC2 instance from an ELB. Sometimes it difficult to determine why Amazon EC2 Auto Scaling didn't terminate an unhealthy instance from Activity History alone. You can find further details about an unhealthy instance's state, and how to terminate that instance, by checking the a few extra things.", "uuid": "94707558cebedbcb77aabaec5d6d2d1bf3f4664db6e9e905d6d905a11a3ef8bc", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Get_Redshift_Daily_Product_Costs.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "e54624c7-4d3e-431a-adda-d2e0e736ed65", "metadata": { "orderProperties": [], "tags": [] }, "source": [ "This RunBook takes data from your AWS Cost and Usage Report, and generates a chart of daily usage for the month for each AWS service.
\n", "It can also be configured to send alerts to slack if a day-over-day change in cost is over the defined threshold.
\n", "Read more in our blog posts:
\n", "https://unskript.com/blog/keeping-your-cloud-costs-in-check-automated-aws-cost-charts-and-alerting/
\n", "https://unskript.com/blog/cloud-costs-charting-daily-ec2-usage-and-cost/
\n", "This RunBook requires a Cost and Usage report in RedShift (here's a link to the AWS docs).
\n", "To Update the Redshift table daily - take a look at the Update Redshift database from S3 RunBook. This will ensure that the data in the Redshift table is up to date.
\n", "" ] }, { "cell_type": "code", "execution_count": 14, "id": "b3644c49-9166-4715-a097-2f27d5c81532", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "1ce9f756a4f1503df353fd5e8df7ea32ebe801a93c607251fea1a5367861da89", "checkEnabled": false, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Given a Secret Name - this Action returns the Secret ARN", "execution_data": { "last_date_success_run_cell": "2023-04-07T16:27:42.487Z" }, "id": 189, "index": 189, "inputData": [ { "region": { "constant": false, "value": "region" }, "secret_name": { "constant": false, "value": "secret_name" } } ], "inputschema": [ { "properties": { "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "secret_name": { "description": "AWS Secret Name", "title": "secret_name", "type": "string" } }, "required": [ "region", "secret_name" ], "title": "aws_get_secrets_manager_secretARN", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Get Secrets Manager SecretARN", "nouns": [], "orderProperties": [ "region", "secret_name" ], "output": { "type": "" }, "outputParams": { "output_name": "secretArn", "output_name_enabled": true }, "printOutput": true, "tags": [], "verbs": [] }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "\n", "from __future__ import annotations\n", "\n", "from typing import Optional\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_secrets_manager_secretARN_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint({\"secret\": output})\n", "\n", "\n", "@beartype\n", "@beartype\n", "@beartype\n", "def aws_get_secrets_manager_secretARN(handle, region: str, secret_name:str) -> str:\n", "\n", "\n", " # Create a Secrets Manager client\n", "\n", " client = handle.client(\n", " service_name='secretsmanager',\n", " region_name=region\n", " )\n", "\n", "\n", " get_secret_value_response = client.get_secret_value(\n", " SecretId=secret_name\n", " )\n", "\n", " #print(get_secret_value_response)\n", " # Decrypts secret using the associated KMS key.\n", " secretArn = get_secret_value_response['ARN']\n", " return secretArn\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"secret_name\": \"secret_name\"\n", " }''')\n", "task.configure(outputName=\"secretArn\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_secrets_manager_secretARN, lego_printer=aws_get_secrets_manager_secretARN_printer, hdl=hdl, args=args)" ] }, { "cell_type": "code", "execution_count": 15, "id": "6db09689-1a22-4cac-81be-cb1e3d6e7ef0", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-04-07T16:27:47.517Z" }, "jupyter": { "source_hidden": true }, "name": "Create SQL Query", "orderProperties": [], "tags": [], "title": "Create SQL Query" }, "outputs": [], "source": [ "import datetime\n", "\n", "today = datetime.datetime.now()\n", "\n", "yearmonth = today.strftime('%Y%m')\n", "tableName = 'awsbilling'+ yearmonth\n", "todayDay = int(today.strftime('%d'))\n", "yesterDay = 0\n", "if todayDay >1:\n", " yesterDay = todayDay - 1\n", "\n", "sqlQuery = f\"select lineitem_productcode, date_part(day, cast(lineitem_usagestartdate as date)) as day, SUM((lineitem_unblendedcost)::numeric(37,4)) as cost from {tableName} group by lineitem_productcode, day order by cost desc;\"\n", "\n", "print(sqlQuery)\n", "\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "id": "99b78f89-b8e0-4aba-86b1-60ad14274207", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_REDSHIFT" ], "actionDescription": "Make a SQL Query to the given AWS Redshift database", "actionEntryFunction": "aws_create_redshift_query", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_STR", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Redshift Query", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "edacb40b6b085473676c85af90fd36de2b23e8fd763ee25c787e8fd629c45773", "continueOnError": false, "credentialsJson": {}, "description": "Make a SQL Query to the given AWS Redshift database", "id": 1, "index": 1, "inputData": [ { "cluster": { "constant": false, "value": "cluster" }, "database": { "constant": false, "value": "database" }, "query": { "constant": false, "value": "sqlQuery" }, "region": { "constant": false, "value": "region" }, "secretArn": { "constant": false, "value": "secretArn" } } ], "inputschema": [ { "properties": { "cluster": { "description": "Name of Redshift Cluster", "title": "cluster", "type": "string" }, "database": { "description": "Name of your Redshift database", "title": "database", "type": "string" }, "query": { "description": "sql query to run", "title": "query", "type": "string" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "secretArn": { "description": "Value of your Secrets Manager ARN", "title": "secretArn", "type": "string" } }, "required": [ "region", "query", "cluster", "database", "secretArn" ], "title": "aws_create_redshift_query", "type": "object" } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Redshift Query", "orderProperties": [ "region", "query", "cluster", "database", "secretArn" ], "printOutput": true, "tags": [], "uuid": "edacb40b6b085473676c85af90fd36de2b23e8fd763ee25c787e8fd629c45773", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "\n", "\n", "from __future__ import annotations\n", "from pydantic import BaseModel, Field\n", "from beartype import beartype\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_create_redshift_query(\n", " handle,\n", " region: str,\n", " cluster:str,\n", " database:str,\n", " secretArn: str,\n", " query:str\n", " ) -> str:\n", "\n", " # Input param validation.\n", " #major change\n", " client = handle.client('redshift-data', region_name=region)\n", " # execute the query\n", " response = client.execute_statement(\n", " ClusterIdentifier=cluster,\n", " Database=database,\n", " SecretArn=secretArn,\n", " Sql=query\n", " )\n", " resultId = response['Id']\n", " print(response)\n", " print(\"resultId\",resultId)\n", "\n", "\n", " return resultId\n", "\n", "#make a change\n", "\n", "\n", "def unskript_default_printer(output):\n", " if isinstance(output, (list, tuple)):\n", " for item in output:\n", " print(f'item: {item}')\n", " elif isinstance(output, dict):\n", " for item in output.items():\n", " print(f'item: {item}')\n", " else:\n", " print(f'Output for {task.name}')\n", " print(output)\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"query\": \"sqlQuery\",\n", " \"cluster\": \"cluster\",\n", " \"database\": \"database\",\n", " \"secretArn\": \"secretArn\"\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_create_redshift_query, lego_printer=unskript_default_printer, hdl=hdl, args=args)" ] }, { "cell_type": "code", "execution_count": 17, "id": "b285b379-5226-4896-89db-b5209e19662f", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "26435cb53d995eccf75fd1e0692e611fcdb1b7e09511bbfe365f0e9a5abc416f", "checkEnabled": false, "collapsed": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Given an QueryId, this Action will give you the status of the Query, along with other data like the number of lines/", "execution_data": { "last_date_success_run_cell": "2023-04-07T16:27:52.719Z" }, "id": 204, "index": 204, "inputData": [ { "queryId": { "constant": false, "value": "queryId" }, "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "queryId": { "description": "Id of Redshift Query", "title": "queryId", "type": "string" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [ "region", "queryId" ], "title": "aws_get_redshift_query_details", "type": "object" } ], "isUnskript": false, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Get Redshift Query Details", "nouns": [], "orderProperties": [ "region", "queryId" ], "output": { "type": "" }, "printOutput": true, "tags": [], "verbs": [] }, "outputs": [], "source": [ "from __future__ import annotations\n", "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import List, Dict\n", "from unskript.connectors.aws import aws_get_paginator\n", "import pprint\n", "from beartype import beartype\n", "\n", "\n", "from typing import Optional\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_redshift_query_details(handle, region: str, queryId:str) -> Dict:\n", "\n", " client = handle.client('redshift-data', region_name=region)\n", " response = client.describe_statement(\n", " Id=queryId\n", " )\n", " resultReady = response['HasResultSet']\n", " queryTimeNs = response['Duration']\n", " ResultRows = response['ResultRows']\n", " details = {\"Status\": response['Status'],\n", " \"resultReady\": resultReady, \n", " \"queryTimeNs\":queryTimeNs,\n", " \"ResultRows\":ResultRows\n", " }\n", "\n", " #return resultReady\n", " return details\n", "\n", "\n", "\n", "\n", "def unskript_default_printer(output):\n", "\n", " pp = pprint.PrettyPrinter(indent=4)\n", " pp.pprint(output)\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"queryId\": \"queryId\",\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_redshift_query_details, lego_printer=unskript_default_printer, hdl=hdl, args=args)" ] }, { "cell_type": "code", "execution_count": 18, "id": "eae5bad1-0dfd-46f8-8efe-10ffe3b9c40d", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "95e51ea5a6230444928042f7932d680fcbc575d053dfa8ed6b60bc7e9b50adcc", "checkEnabled": false, "collapsed": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Given a QueryId, Get the Query Result, and format into a List", "execution_data": { "last_date_success_run_cell": "2023-04-07T16:28:05.326Z" }, "id": 218, "index": 218, "inputData": [ { "region": { "constant": false, "value": "region" }, "resultId": { "constant": false, "value": "queryId" } } ], "inputschema": [ { "properties": { "region": { "description": "AWS Region", "title": "region", "type": "string" }, "resultId": { "description": "Redshift Query Result", "title": "resultId", "type": "string" } }, "required": [ "resultId", "region" ], "title": "aws_get_redshift_result", "type": "object" } ], "isUnskript": false, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Get Redshift Result", "nouns": [], "orderProperties": [ "resultId", "region" ], "output": { "type": "" }, "outputParams": { "output_name": "redshiftresult", "output_name_enabled": true }, "printOutput": false, "tags": [], "verbs": [] }, "outputs": [], "source": [ "from __future__ import annotations\n", "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import List, Dict\n", "from unskript.connectors.aws import aws_get_paginator\n", "import pprint\n", "from beartype import beartype\n", "import time\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_redshift_result(handle, region:str, resultId: str) -> List:\n", "\n", " time.sleep(10)\n", " client = handle.client('redshift-data', region_name=region)\n", " result = client.get_statement_result(\n", " Id=resultId\n", " )\n", " #result has the Dictionary, but it is not easily queried\n", " #get all the columns into an array\n", " columnNames = []\n", " for column in result['ColumnMetadata']:\n", " columnNames.append(column['label'])\n", " #print(columnNames)\n", "\n", " #now let's make the output into a dict\n", " listResult = []\n", " for record in result['Records']:\n", "\n", " for key, value in record[0].items():\n", " rowId = value\n", " entryCounter = 0\n", " entryDict = {}\n", " for entry in record:\n", "\n", " for key, value in entry.items():\n", " entryDict[columnNames[entryCounter]] = value\n", " entryCounter +=1\n", " #print(\"entryDict\",entryDict)\n", " listResult.append(entryDict)\n", "\n", " #print(listResult)\n", " return listResult\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"resultId\": \"queryId\"\n", " }''')\n", "task.configure(outputName=\"redshiftresult\")\n", "\n", "task.configure(printOutput=False)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_redshift_result, lego_printer=unskript_default_printer, hdl=hdl, args=args)" ] }, { "cell_type": "code", "execution_count": 9, "id": "b42d2d45-0a95-4f16-8b44-0cced11ee848", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-04-07T01:06:24.931Z" }, "jupyter": { "source_hidden": true }, "name": "Build Chart", "orderProperties": [], "tags": [], "title": "Build Chart" }, "outputs": [], "source": [ "import matplotlib as mpl\n", "mpl.use('agg')\n", "from matplotlib.figure import Figure\n", "import panel\n", "import matplotlib.pyplot as plt\n", "import pandas as pd\n", "import pprint\n", "import io, base64, urllib\n", "from PIL import Image\n", "\n", "df = pd.DataFrame.from_dict(redshiftresult)\n", "df['cost']=df['cost'].astype(float)\n", "df['day']=df['day'].astype(int)\n", "\n", "%matplotlib inline\n", "\n", "font = {'size' : 22}\n", "dfpivot = df.pivot(index='day', columns='lineitem_productcode', values='cost')\n", "dfpivot.plot(linewidth=5,ylabel=\"daily cost in $\", figsize=(16, 9) )\n", "\n", "plt.rc('font', **font)\n", "plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n", "plt.xticks(fontsize=22)\n", "plt.yticks(fontsize=22)\n", "\n", "plt.show()\n", "\n", "dfpivot.plot(linewidth=5,ylabel=\"daily cost in $\", figsize=(16, 9) )\n", "plt.ylim((0,10))\n", "plt.rc('font', **font)\n", "plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n", "plt.show()\n", "\n", "dfpivot.plot(linewidth=5,ylabel=\"daily cost in $\", figsize=(16, 9) )\n", "plt.xlim((todayDay-7,todayDay))\n", "plt.rc('font', **font)\n", "plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n", "fig = plt.gcf()\n", "plt.show()\n", "\n", "\n", "fig.savefig('awsProducts7Day.jpg')\n", "im = Image.open('awsProducts7Day.jpg')\n", "display(im)\n", "\n" ] }, { "cell_type": "code", "execution_count": 19, "id": "5f476f1b-a7b0-4927-9c7d-6335e9d3e7da", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-04-07T16:34:22.658Z" }, "jupyter": { "source_hidden": true }, "name": "build alert", "orderProperties": [], "tags": [], "title": "build alert" }, "outputs": [], "source": [ "from datetime import date \n", "\n", "\n", "today = todayDay -1\n", "yesterday =yesterDay -1\n", "\n", "print(today)\n", "bigchange = {}\n", "listChange = []\n", "alert = False\n", "alertText = ''\n", "if yesterday >0:\n", " for instance in dfpivot.columns:\n", " todayCost = dfpivot.at[today, instance]\n", " yesterdayCost = dfpivot.at[yesterday, instance]\n", "\n", " delta =(todayCost-yesterdayCost)/yesterdayCost\n", " if abs(todayCost-yesterdayCost) >1: \n", " if delta >.05:\n", " #print( instance, delta,dfpivot.at[today, instance], dfpivot.at[yesterday, instance])\n", " bigchange[instance] = {\"delta\":delta, \"todayCost\":todayCost,\"yesterdayCost\":yesterdayCost}\n", " listChange.append([instance, yesterdayCost, todayCost])\n", " alertText = '@here There has been a large change in AWS Costs'\n", " alert = True\n", " if date.today().weekday() == 0:\n", " alertText = 'Today is Monday, Here is the last week of AWS Costs'\n", " alert = True\n", " print(listChange)\n", " print(\"bigchange\", bigchange)" ] }, { "cell_type": "code", "execution_count": 20, "id": "e0091066-452a-4c06-81fc-3704ee90168c", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": true, "action_uuid": "6a87f83ab0ecfeecb9c98d084e2b1066c26fa64be5b4928d5573a5d60299802d", "checkEnabled": false, "condition_enabled": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "customCell": true, "description": "Post Slack Message", "execution_data": { "last_date_success_run_cell": "2023-04-07T16:34:29.971Z" }, "id": 82, "index": 82, "inputData": [ { "channel": { "constant": false, "value": "\"devrel_doug_test1\"" }, "comment": { "constant": false, "value": "alertText" }, "image": { "constant": false, "value": "'awsProducts7Day.jpg'" } } ], "inputschema": [ { "properties": { "channel": { "default": "", "description": "Name of slack channel.", "title": "Channel", "type": "string" }, "comment": { "default": "", "description": "Comment to add with image", "required": false, "title": "comment", "type": "string" }, "image": { "default": "", "description": "image to uplaod", "title": "image", "type": "string" } }, "required": [ "channel", "message" ], "title": "slack_post_image", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_SLACK", "name": "Post Slack Image", "nouns": [], "orderProperties": [ "channel", "image", "comment" ], "output": { "type": "" }, "printOutput": true, "startcondition": "alert", "tags": [], "title": "Post Slack Image", "verbs": [] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "\n", "import pprint\n", "\n", "from pydantic import BaseModel, Field\n", "from slack_sdk import WebClient\n", "from slack_sdk.errors import SlackApiError\n", "\n", "pp = pprint.PrettyPrinter(indent=2)\n", "\n", "from beartype import beartype\n", "\n", "from beartype import beartype\n", "@beartype\n", "def slack_post_image_printer(output):\n", " if output is not None:\n", " pprint.pprint(output)\n", " else:\n", " return\n", "\n", "\n", "@beartype\n", "@beartype\n", "def slack_post_image(\n", " handle: WebClient,\n", " channel: str,\n", " comment: str,\n", " image: str) -> str:\n", "\n", " try:\n", " result = handle.files_upload(\n", " channels = channel,\n", " initial_comment=comment,\n", " file=image\n", " )\n", " return f\"Successfuly Sent Message on Channel: #{channel}\"\n", "\n", " except Exception as e:\n", " print(\"\\n\\n\")\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.__str__()}\")\n", " return f\"Unable to send message on {channel}\"\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"channel\": \"\\\\\"devrel_doug_test1\\\\\"\",\n", " \"image\": \"'awsProducts7Day.jpg'\",\n", " \"comment\": \"alertText\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"alert\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(slack_post_image, lego_printer=slack_post_image_printer, hdl=hdl, args=args)" ] } ], "metadata": { "execution_data": { "runbook_name": "AWS Redshift Get Daily Costs from AWS Products", "parameters": [ "cluster", "database", "region", "secret_name" ] }, "kernelspec": { "display_name": "Python 3.10.6 64-bit", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "parameterSchema": { "properties": { "cluster": { "description": "The Redshift Cluster to be queried", "title": "cluster", "type": "string" }, "database": { "description": "the Redshift Database in our query", "title": "database", "type": "string" }, "region": { "description": "AWS Region", "title": "region", "type": "string" }, "secret_name": { "description": "AWS Secret Name to retrieve ARN for", "title": "secret_name", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Get_Redshift_Daily_Product_Costs.json ================================================ { "name": "AWS Redshift Get Daily Costs from AWS Products", "description": "This runbook can be used to create charts and alerts around Your AWS product usage. It requires a Cost and USage report to be live in RedShift.", "uuid": "a79201f221993367e23dd9603ed7ef5123324353d717c566f902f7ca6e471f5c", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_CLOUDOPS" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Get_Redshift_EC2_Daily_Costs.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "f46958a9-6580-475a-b845-72aacface2dc", "metadata": { "jupyter": { "source_hidden": false }, "name": "Introduction", "orderProperties": [], "tags": [], "title": "Introduction" }, "source": [ "
This RunBook takes data from your AWS Cost and Usage Report, and generates a chart of daily usage for the month for each AWS service.
\n", "It can also be configured to send alerts to slack if a day-over-day change in cost is over the defined threshold.
\n", "Read more in our blog posts:
\n", "https://unskript.com/blog/keeping-your-cloud-costs-in-check-automated-aws-cost-charts-and-alerting/
\n", "https://unskript.com/blog/cloud-costs-charting-daily-ec2-usage-and-cost/
\n", "This RunBook requires a Cost and Usage report in RedShift (here's a link to the AWS docs).
\n", "To Update the Redshift table daily - take a look at the Update Redshift database from S3 RunBook. This will ensure that the data in the Redshift table is up to date.
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "78914f28-2fd7-477a-8b43-080c736515e8", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_SECRET_MANAGER" ], "actionDescription": "Given a Secret Name - this Action returns the Secret ARN", "actionEntryFunction": "aws_get_secrets_manager_secretARN", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_STR", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Get Secrets Manager SecretARN", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "1ce9f756a4f1503df353fd5e8df7ea32ebe801a93c607251fea1a5367861da89", "collapsed": true, "continueOnError": false, "credentialsJson": {}, "description": "Given a Secret Name - this Action returns the Secret ARN", "id": 4, "index": 4, "inputData": [ { "region": { "constant": false, "value": "region" }, "secret_name": { "constant": false, "value": "secret_name" } } ], "inputschema": [ { "properties": { "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "secret_name": { "description": "AWS Secret Name", "title": "secret_name", "type": "string" } }, "required": [ "region", "secret_name" ], "title": "aws_get_secrets_manager_secretARN", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Get Secrets Manager SecretARN", "orderProperties": [ "region", "secret_name" ], "outputParams": { "output_name": "secretArn", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "tags": [ "aws_get_secrets_manager_secretARN" ], "uuid": "1ce9f756a4f1503df353fd5e8df7ea32ebe801a93c607251fea1a5367861da89", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from __future__ import annotations\n", "import pprint\n", "from pydantic import BaseModel, Field\n", "from beartype import beartype\n", "from botocore.exceptions import ClientError\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_secrets_manager_secretARN_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint({\"secret\": output})\n", "\n", "\n", "@beartype\n", "@beartype\n", "def aws_get_secrets_manager_secretARN(handle, region: str, secret_name:str) -> str:\n", " # Create a Secrets Manager client\n", " client = handle.client(\n", " service_name='secretsmanager',\n", " region_name=region\n", " )\n", "\n", " try:\n", " get_secret_value_response = client.get_secret_value(\n", " SecretId=secret_name\n", " )\n", " except ClientError as e:\n", " # For a list of exceptions thrown, see\n", " # https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html\n", " raise e\n", " # Decrypts secret using the associated KMS key.\n", " secretArn = get_secret_value_response['ARN']\n", " return secretArn\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"secret_name\": \"secret_name\"\n", " }''')\n", "task.configure(outputName=\"secretArn\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_secrets_manager_secretARN, lego_printer=aws_get_secrets_manager_secretARN_printer, hdl=hdl, args=args)" ] }, { "cell_type": "code", "execution_count": 4, "id": "6db09689-1a22-4cac-81be-cb1e3d6e7ef0", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-04-07T16:39:01.145Z" }, "jupyter": { "source_hidden": true }, "name": "Create SQL Query", "orderProperties": [], "tags": [], "title": "Create SQL Query" }, "outputs": [], "source": [ "import datetime\n", "\n", "today = datetime.datetime.now()\n", "\n", "yearmonth = today.strftime('%Y%m')\n", "tableName = 'awsbilling'+ yearmonth\n", "todayDay = int(today.strftime('%d'))\n", "yesterDay = 0\n", "if todayDay >1:\n", " yesterDay = todayDay - 1\n", "\n", "sqlQuery = f\"SELECT date_part(day, cast(lineitem_usagestartdate as date)) as day, product_instancetype,SUM(lineitem_usageamount)::numeric(37, 4) AS usage_hours, SUM((lineitem_unblendedcost)::numeric(37,4)) AS usage_cost FROM {tableName} WHERE length(lineitem_usagestartdate)>8 AND product_productfamily = 'Compute Instance' AND pricing_unit IN ('Hours', 'Hrs') GROUP BY day, product_instancetype ORDER BY 1 DESC, 3 DESC, 2 \"\n", "print(sqlQuery)\n", "\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": 5, "id": "57562d3e-4026-4f85-995d-d912318a259a", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "edacb40b6b085473676c85af90fd36de2b23e8fd763ee25c787e8fd629c45773", "checkEnabled": false, "collapsed": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Make a SQL Query to the given AWS Redshift database", "execution_data": { "last_date_success_run_cell": "2023-04-07T16:39:06.945Z" }, "id": 241, "index": 241, "inputData": [ { "cluster": { "constant": false, "value": "cluster" }, "database": { "constant": false, "value": "database" }, "query": { "constant": false, "value": "sqlQuery" }, "region": { "constant": false, "value": "region" }, "secretArn": { "constant": false, "value": "secretArn" } } ], "inputschema": [ { "properties": { "cluster": { "description": "Name of Redshift Cluster", "title": "cluster", "type": "string" }, "database": { "description": "Name of your Redshift database", "title": "database", "type": "string" }, "query": { "description": "sql query to run", "title": "query", "type": "string" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "secretArn": { "description": "Value of your Secrets Manager ARN", "title": "secretArn", "type": "string" } }, "required": [ "region", "query", "cluster", "database", "secretArn" ], "title": "aws_create_redshift_query", "type": "object" } ], "isUnskript": false, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Redshift Query", "nouns": [], "orderProperties": [ "region", "query", "cluster", "database", "secretArn" ], "output": { "type": "" }, "outputParams": { "output_name": "queryId", "output_name_enabled": true }, "printOutput": true, "tags": [ "aws_create_redshift_query" ], "verbs": [] }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "\n", "\n", "from __future__ import annotations\n", "from pydantic import BaseModel, Field\n", "from typing import List, Dict\n", "from unskript.connectors.aws import aws_get_paginator\n", "import pprint\n", "from beartype import beartype\n", "@beartype\n", "def aws_create_redshift_query(handle, region: str,cluster:str, database:str, secretArn: str, query:str) -> str:\n", "\n", " # Input param validation.\n", " #major change\n", " client = handle.client('redshift-data', region_name=region)\n", " # define your query\n", " query = query\n", " # execute the query\n", " response = client.execute_statement(\n", " ClusterIdentifier=cluster,\n", " Database=database,\n", " SecretArn=secretArn,\n", " Sql=query\n", " )\n", " resultId = response['Id']\n", " print(response)\n", " print(\"resultId\",resultId)\n", "\n", "\n", " return resultId\n", "\n", "#make a change\n", "\n", "\n", "def unskript_default_printer(output):\n", " if isinstance(output, (list, tuple)):\n", " for item in output:\n", " print(f'item: {item}')\n", " elif isinstance(output, dict):\n", " for item in output.items():\n", " print(f'item: {item}')\n", " else:\n", " print(f'Output for {task.name}')\n", " print(output)\n", "\n", "task = Task(Workflow())\n", "\n", "task.configure(inputParamsJson='''{\n", " \"cluster\": \"cluster\",\n", " \"database\": \"database\",\n", " \"query\": \"sqlQuery\",\n", " \"region\": \"region\",\n", " \"secretArn\": \"secretArn\"\n", " }''')\n", "task.configure(outputName=\"queryId\")\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_create_redshift_query, lego_printer=unskript_default_printer, hdl=hdl, args=args)" ] }, { "cell_type": "code", "execution_count": 6, "id": "b285b379-5226-4896-89db-b5209e19662f", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "26435cb53d995eccf75fd1e0692e611fcdb1b7e09511bbfe365f0e9a5abc416f", "checkEnabled": false, "collapsed": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Given an QueryId, this Action will give you the status of the Query, along with other data like the number of lines/", "execution_data": { "last_date_success_run_cell": "2023-04-07T16:39:12.638Z" }, "id": 204, "index": 204, "inputData": [ { "queryId": { "constant": false, "value": "queryId" }, "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "queryId": { "description": "Id of Redshift Query", "title": "queryId", "type": "string" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [ "region", "queryId" ], "title": "aws_get_redshift_query_details", "type": "object" } ], "isUnskript": false, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Get Redshift Query Details", "nouns": [], "orderProperties": [ "region", "queryId" ], "output": { "type": "" }, "printOutput": true, "tags": [ "aws_get_redshift_query_details" ], "verbs": [] }, "outputs": [], "source": [ "from __future__ import annotations\n", "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import List, Dict\n", "from unskript.connectors.aws import aws_get_paginator\n", "import pprint\n", "from beartype import beartype\n", "\n", "\n", "from typing import Optional\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_redshift_query_details(handle, region: str, queryId:str) -> Dict:\n", "\n", " client = handle.client('redshift-data', region_name=region)\n", " response = client.describe_statement(\n", " Id=queryId\n", " )\n", " resultReady = response['HasResultSet']\n", " queryTimeNs = response['Duration']\n", " ResultRows = response['ResultRows']\n", " details = {\"Status\": response['Status'],\n", " \"resultReady\": resultReady, \n", " \"queryTimeNs\":queryTimeNs,\n", " \"ResultRows\":ResultRows\n", " }\n", "\n", " #return resultReady\n", " return details\n", "\n", "\n", "\n", "\n", "def unskript_default_printer(output):\n", "\n", " pp = pprint.PrettyPrinter(indent=4)\n", " pp.pprint(output)\n", "\n", "task = Task(Workflow())\n", "\n", "task.configure(inputParamsJson='''{\n", " \"queryId\": \"queryId\",\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_redshift_query_details, lego_printer=unskript_default_printer, hdl=hdl, args=args)" ] }, { "cell_type": "code", "execution_count": 7, "id": "eae5bad1-0dfd-46f8-8efe-10ffe3b9c40d", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "95e51ea5a6230444928042f7932d680fcbc575d053dfa8ed6b60bc7e9b50adcc", "checkEnabled": false, "collapsed": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Given a QueryId, Get the Query Result, and format into a List", "execution_data": { "last_date_success_run_cell": "2023-04-07T16:39:29.257Z" }, "id": 218, "index": 218, "inputData": [ { "region": { "constant": false, "value": "region" }, "resultId": { "constant": false, "value": "queryId" } } ], "inputschema": [ { "properties": { "region": { "description": "AWS Region", "title": "region", "type": "string" }, "resultId": { "description": "Redshift Query Result", "title": "resultId", "type": "string" } }, "required": [ "resultId", "region" ], "title": "aws_get_redshift_result", "type": "object" } ], "isUnskript": false, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Get Redshift Result", "nouns": [], "orderProperties": [ "resultId", "region" ], "output": { "type": "" }, "outputParams": { "output_name": "redshiftresult", "output_name_enabled": true }, "printOutput": false, "tags": [ "aws_get_redshift_result" ], "verbs": [] }, "outputs": [], "source": [ "from __future__ import annotations\n", "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import List, Dict\n", "from unskript.connectors.aws import aws_get_paginator\n", "import pprint\n", "from beartype import beartype\n", "import time\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_redshift_result(handle, region:str, resultId: str) -> List:\n", "\n", " time.sleep(10)\n", " client = handle.client('redshift-data', region_name=region)\n", " result = client.get_statement_result(\n", " Id=resultId\n", " )\n", " #result has the Dictionary, but it is not easily queried\n", " #get all the columns into an array\n", " columnNames = []\n", " for column in result['ColumnMetadata']:\n", " columnNames.append(column['label'])\n", " #print(columnNames)\n", "\n", " #now let's make the output into a dict\n", " listResult = []\n", " for record in result['Records']:\n", "\n", " for key, value in record[0].items():\n", " rowId = value\n", " entryCounter = 0\n", " entryDict = {}\n", " for entry in record:\n", "\n", " for key, value in entry.items():\n", " entryDict[columnNames[entryCounter]] = value\n", " entryCounter +=1\n", " #print(\"entryDict\",entryDict)\n", " listResult.append(entryDict)\n", "\n", " #print(listResult)\n", " return listResult\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"resultId\": \"queryId\"\n", " }''')\n", "task.configure(outputName=\"redshiftresult\")\n", "\n", "task.configure(printOutput=False)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_redshift_result, lego_printer=unskript_default_printer, hdl=hdl, args=args)" ] }, { "cell_type": "code", "execution_count": 8, "id": "b42d2d45-0a95-4f16-8b44-0cced11ee848", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-04-07T16:39:32.068Z" }, "jupyter": { "source_hidden": true }, "name": "Build Chart", "orderProperties": [], "tags": [], "title": "Build Chart" }, "outputs": [], "source": [ "import matplotlib as mpl\n", "mpl.use('agg')\n", "from matplotlib.figure import Figure\n", "import panel\n", "import matplotlib.pyplot as plt\n", "import pandas as pd\n", "import pprint\n", "import io, base64, urllib\n", "from PIL import Image\n", "\n", "df = pd.DataFrame.from_dict(redshiftresult)\n", "df['day']=df['day'].astype(int)\n", "df['usage_hours']=df['usage_hours'].astype(float)\n", "df['usage_cost']=df['usage_cost'].astype(float)\n", "\n", "%matplotlib inline\n", "\n", "font = {'size' : 22}\n", "\n", "\n", "font = {'size' : 16}\n", "plt.rc('font', **font)\n", "dfpivot = df.pivot(index='day', columns='product_instancetype', values='usage_cost')\n", "dfpivot.plot(linewidth=5, ylabel=\"daily cost in $\", figsize=(16, 9), )\n", "plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n", "plt.show()\n", "\n", "dfpivot = df.pivot(index='day', columns='product_instancetype', values='usage_cost')\n", "dfpivot.plot(linewidth=5, ylabel=\"daily cost in $\", figsize=(16, 9), )\n", "plt.ylim((0,10))\n", "plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n", "plt.show()\n", "\n", "dfpivot = df.pivot(index='day', columns='product_instancetype', values='usage_cost')\n", "dfpivot.plot(linewidth=5,ylabel=\"daily cost in $\", figsize=(16, 9) )\n", "plt.xlim((todayDay-7,todayDay))\n", "plt.rc('font', **font)\n", "plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n", "fig = plt.gcf()\n", "plt.show()\n", "\n", "\n", "fig.savefig('awsProducts7Day.jpg')\n", "im = Image.open('awsProducts7Day.jpg')\n", "display(im)\n", "\n" ] }, { "cell_type": "code", "execution_count": 9, "id": "5f476f1b-a7b0-4927-9c7d-6335e9d3e7da", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-04-07T16:39:35.956Z" }, "jupyter": { "source_hidden": true }, "name": "build alert", "orderProperties": [], "tags": [], "title": "build alert" }, "outputs": [], "source": [ "from datetime import date \n", "\n", "\n", "today = todayDay -1\n", "yesterday =yesterDay -1\n", "\n", "print(today)\n", "bigchange = {}\n", "listChange = []\n", "alert = False\n", "alertText = ''\n", "if yesterday >0:\n", " for instance in dfpivot.columns:\n", " todayCost = dfpivot.at[today, instance]\n", " yesterdayCost = dfpivot.at[yesterday, instance]\n", "\n", " delta =(todayCost-yesterdayCost)/yesterdayCost\n", " if abs(todayCost-yesterdayCost) >1: \n", " if delta >.05:\n", " #print( instance, delta,dfpivot.at[today, instance], dfpivot.at[yesterday, instance])\n", " bigchange[instance] = {\"delta\":delta, \"todayCost\":todayCost,\"yesterdayCost\":yesterdayCost}\n", " listChange.append([instance, yesterdayCost, todayCost])\n", " alertText = '@here There has been a large change in EC2 Costs'\n", " alert = True\n", " if date.today().weekday() == 0:\n", " alertText = 'Today is Monday, Here is the last week of EC2 Costs'\n", " alert = True\n", " print(date.today().weekday())\n", " print(\"bigchange\", bigchange)" ] }, { "cell_type": "code", "execution_count": 11, "id": "e0091066-452a-4c06-81fc-3704ee90168c", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": true, "action_uuid": "6a87f83ab0ecfeecb9c98d084e2b1066c26fa64be5b4928d5573a5d60299802d", "checkEnabled": false, "condition_enabled": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "customCell": true, "description": "Post Slack Message", "execution_data": { "last_date_success_run_cell": "2023-04-07T16:39:47.279Z" }, "id": 82, "index": 82, "inputData": [ { "channel": { "constant": false, "value": "\"devrel_doug_test1\"" }, "comment": { "constant": false, "value": "alertText" }, "image": { "constant": false, "value": "'awsProducts7Day.jpg'" } } ], "inputschema": [ { "properties": { "channel": { "default": "", "description": "Name of slack channel.", "title": "Channel", "type": "string" }, "comment": { "default": "", "description": "Comment to add with image", "required": false, "title": "comment", "type": "string" }, "image": { "default": "", "description": "image to uplaod", "title": "image", "type": "string" } }, "required": [ "channel", "message" ], "title": "slack_post_image", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_SLACK", "name": "Post Slack Image", "nouns": [], "orderProperties": [ "channel", "image", "comment" ], "output": { "type": "" }, "printOutput": true, "startcondition": "alert", "tags": [ "slack_post_message" ], "title": "Post Slack Image", "verbs": [] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "\n", "import pprint\n", "\n", "from pydantic import BaseModel, Field\n", "from slack_sdk import WebClient\n", "from slack_sdk.errors import SlackApiError\n", "\n", "pp = pprint.PrettyPrinter(indent=2)\n", "\n", "from beartype import beartype\n", "\n", "from beartype import beartype\n", "@beartype\n", "def slack_post_image_printer(output):\n", " if output is not None:\n", " pprint.pprint(output)\n", " else:\n", " return\n", "\n", "\n", "@beartype\n", "@beartype\n", "def slack_post_image(\n", " handle: WebClient,\n", " channel: str,\n", " comment: str,\n", " image: str) -> str:\n", "\n", " try:\n", " result = handle.files_upload(\n", " channels = channel,\n", " initial_comment=comment,\n", " file=image\n", " )\n", " return f\"Successfuly Sent Message on Channel: #{channel}\"\n", "\n", " except Exception as e:\n", " print(\"\\n\\n\")\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.__str__()}\")\n", " return f\"Unable to send message on {channel}\"\n", "\n", "\n", "task = Task(Workflow())\n", "\n", "task.configure(inputParamsJson='''{\n", " \"channel\": \"\\\\\"devrel_doug_test1\\\\\"\",\n", " \"image\": \"'awsProducts7Day.jpg'\",\n", " \"comment\": \"alertText\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"alert\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(slack_post_image, lego_printer=slack_post_image_printer, hdl=hdl, args=args)" ] } ], "metadata": { "execution_data": { "runbook_name": "AWS Redshift Get Daily Costs from EC2 Usage", "parameters": [ "cluster", "database", "region", "secret_name" ] }, "kernelspec": { "display_name": "Python 3.10.6 64-bit", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "parameterSchema": { "properties": { "cluster": { "description": "The Redshift Cluster to be queried", "title": "cluster", "type": "string" }, "database": { "description": "the Redshift Database in our query", "title": "database", "type": "string" }, "region": { "description": "AWS Region", "title": "region", "type": "string" }, "secret_name": { "description": "AWS Secret Name to retrieve ARN for", "title": "secret_name", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Get_Redshift_EC2_Daily_Costs.json ================================================ { "name": "AWS Redshift Get Daily Costs from EC2 Usage", "description": "This runbook can be used to create charts and alerts around AWS EC2 usage. It requires a Cost and USage report to be live in RedShift.", "uuid": "a79201f221993867e23dd9603ed7ef5123324353d717c566f902f7ca6e471f5c", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_CLOUDOPS" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Lowering_AWS_CloudTrail_Costs_by_Removing_Redundant_Trails.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "1a1d80a5-6559-47b4-954f-8c301c581d8f", "metadata": { "jupyter": { "source_hidden": false }, "name": "Finding Redundant Trails in AWS", "orderProperties": [], "tags": [], "title": "Finding Redundant Trails in AWS" }, "source": [ "
1. Finding Redundant Trails in AWS
" ] }, { "attachments": {}, "cell_type": "markdown", "id": "4465838e-f101-4ff9-ae4a-875f3816bbfb", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1", "orderProperties": [], "tags": [], "title": "Step-1" }, "source": [ "Here we will use unSkript Finding Redundant Trails in AWS action. The AWS CloudTrail service allows developers to enable policies managing compliance, governance, and auditing of their AWS accounts. In addition, AWS CloudTrail offers logging, monitoring, and storage of any activity around actions related to your AWS structures. The service activates from the moment you set up your AWS account, and while it provides real-time activity visibility, it also means higher AWS costs. This action is used to find Redundant Trails in AWS.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "e41b2aa2-2313-4fbe-a320-745afa0983ae", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_CLOUDTRAIL" ], "actionDescription": "This action will find a redundant cloud trail if the attribute IncludeGlobalServiceEvents is true, and then we need to find multiple duplications.", "actionEntryFunction": "aws_finding_redundant_trails", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "c4d55f5dd5bb964460f4ad7335daa8bb094792b0d64149dbddca019513f05598" ], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Finding Redundant Trails in AWS", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "964f2a773fdbd64ec9e9f7e846943824d46fef497b574a088766c63811e61581", "collapsed": true, "credentialsJson": {}, "description": "This action will find a redundant cloud trail if the attribute IncludeGlobalServiceEvents is true, and then we need to find multiple duplications.", "id": 1, "index": 1, "inputschema": [ { "properties": {}, "title": "aws_finding_redundant_trails", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Finding Redundant Trails in AWS", "orderProperties": [], "outputParams": { "output_name": "redundant_trails", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "tags": [], "uuid": "964f2a773fdbd64ec9e9f7e846943824d46fef497b574a088766c63811e61581", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Tuple\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_finding_redundant_trails_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_finding_redundant_trails(handle) -> Tuple:\n", " \"\"\"aws_finding_redundant_trails Returns an array of redundant trails in AWS\n", "\n", " :type handle: object\n", " :param handle: Object returned by the task.validate(...) method.\n", "\n", " :rtype: Tuple with check status and list of redundant trails\n", " \"\"\"\n", " result = []\n", " all_regions = aws_list_all_regions(handle)\n", " for reg in all_regions:\n", " try:\n", " ec2Client = handle.client('cloudtrail', region_name=reg)\n", " response = ec2Client.describe_trails()\n", " for glob_service in response[\"trailList\"]:\n", " if glob_service[\"IncludeGlobalServiceEvents\"] is True:\n", " for i in result:\n", " if i[\"trail_name\"] == glob_service[\"Name\"]:\n", " i[\"regions\"].append(reg)\n", " else:\n", " if not any(i[\"trail_name\"] == glob_service[\"Name\"] for i in result):\n", " trail_dict = {}\n", " trail_dict[\"trail_name\"] = glob_service[\"Name\"]\n", " trail_dict[\"regions\"] = [reg]\n", " result.append(trail_dict)\n", " except Exception as e:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(outputName=\"redundant_trails\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_finding_redundant_trails, lego_printer=aws_finding_redundant_trails_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "e49d7606-52ce-4a2b-bc06-22e5470d1aeb", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 Extension", "orderProperties": [], "tags": [], "title": "Step-1 Extension" }, "source": [ "Output parameters:
\n", "redundant_trails
In this action, we sort the commands on the basis of parameters given by the user as follows,
\n", "stop_multiregion_trail = true To turn off multi-region tracking of cloud trail.\n",
"aws cloudtrail update-trail --region us-west-2 --name cc-test-trail --no-is-multi-region-trailglobal_event_tractiong = true To turn off the global service events tracking the issue of the cloud trail.\n",
"aws cloudtrail update-trail --region us-west-2 --name cc-test-trail --no-include-global-service-eventsstop_multiregion_trail = true and global_event_tractiong = true we use both commands to update the redundant trails.Output parameters: command_list
In this action, we execute the commands from the above actions to update the redundant cloud trails.
\n", " Input parameters: aws_command
Output parameters: updated_output
" ] }, { "cell_type": "code", "execution_count": 42, "id": "a991f490-7bc2-43a5-80ec-cab51729d591", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "1db371aff42291641eb6ba83d7acc3fe28e2468d83be1552e8258dc878c0f70d", "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Execute command using AWS CLI", "execution_data": { "last_date_success_run_cell": "2023-02-21T10:49:24.829Z" }, "id": 199, "index": 199, "inputData": [ { "aws_command": { "constant": false, "value": "iter_item" } } ], "inputschema": [ { "properties": { "aws_command": { "description": "AWS Command eg \"aws ec2 describe-instances\"", "title": "AWS Command", "type": "string" } }, "required": [ "aws_command" ], "title": "aws_execute_cli_command", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": "aws_command", "iter_list": { "constant": false, "objectItems": false, "value": "command_list" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Run Command via AWS CLI: Update Redundant Cloud Trails", "nouns": [], "orderProperties": [ "aws_command" ], "output": { "type": "" }, "outputParams": { "output_name": "updated_output", "output_name_enabled": true }, "printOutput": true, "startcondition": "len(command_list)>0", "tags": [ "aws_execute_cli_command" ], "title": "Run Command via AWS CLI: Update Redundant Cloud Trails", "verbs": [] }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2021 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_execute_cli_command_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def aws_execute_cli_command(handle, aws_command: str) -> str:\n", "\n", " result = handle.aws_cli_command(aws_command)\n", " if result is None or result.returncode != 0:\n", " print(\n", " f\"Error while executing command ({aws_command}): {result}\")\n", " return str()\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"aws_command\": \"iter_item\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"command_list\",\n", " \"iter_parameter\": \"aws_command\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(command_list)>0\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"updated_output\")\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_execute_cli_command, lego_printer=aws_execute_cli_command_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "c4e37a77-7c92-43ab-80de-bb98d15d0a3a", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "### Conclusion\n", "
This Runbook demonstrates the use of unSkript's AWS actions to find redundant trails in AWS and update the cloud trails. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "AWS Lowering CloudTrail Costs by Removing Redundant Trails", "parameters": null }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "parameterSchema": { "properties": { "global_event_tractiong": { "default": false, "description": "To turn off the global service events tracking the issue of the cloud trail.", "title": "global_event_tractiong", "type": "boolean" }, "region": { "description": "To update the cloud trail multi-region tracking to a single region.", "title": "region", "type": "string" }, "stop_multiregion_trail": { "default": false, "description": "To turn off multi-region tracking of cloud trail.", "title": "stop_multiregion_trail", "type": "boolean" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "e8899eb02dfbc033aab5733bdae1bd213fa031d40331094008e8673d99ebab63" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Lowering_AWS_CloudTrail_Costs_by_Removing_Redundant_Trails.json ================================================ { "name": "AWS Lowering CloudTrail Costs by Removing Redundant Trails", "description": "The AWS CloudTrail service allows developers to enable policies managing compliance, governance, and auditing of their AWS account. In addition, AWS CloudTrail offers logging, monitoring, and storage of any activity around actions related to your AWS structures. The service activates from the moment you set up your AWS account and while it provides real-time activity visibility, it also means higher AWS costs. Here Finding Redundant Trails in AWS", "uuid": "c4d55f5dd5bb964460f4ad7335daa8bb094792b0d64149dbddca019513f05598", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_COST_OPT" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Notify_About_Unused_Keypairs.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "5360a41f-ee95-482d-8523-4c5f608eca12", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Find unused Keypairs
2) Send message to Slack
Using unSkript's Filter AWS Unused Keypairs action, we will fetch all the available keypairs and compare them to the ones that are used by the AWS instances. If a match is not found, the keypair is deduced to be unused.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "5a871fd8-ba3a-4eb3-97f2-a083aac7e925", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2" ], "actionDescription": "Filter AWS Unused Keypairs", "actionEntryFunction": "aws_filter_unused_keypairs", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "a28edafac5f3bac3ca34d677d9b01a4bc6f74893e50bc103e5cefb00e0f48746" ], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Filter AWS Unused Keypairs", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "adb9d5bea27bf94e9537edccd8683accde12b7afa786ce6e8d89b34079846a44", "continueOnError": false, "credentialsJson": {}, "description": "Filter AWS Unused Keypairs", "id": 4, "index": 4, "inputData": [ { "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "Name of the AWS Region", "title": "Region", "type": "string" } }, "required": [], "title": "aws_filter_unused_keypairs", "type": "object" } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Filter AWS Unused Keypairs", "orderProperties": [ "region" ], "outputParams": { "output_name": "unused_keypairs", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "tags": [ "aws_filter_unused_keypairs" ], "uuid": "adb9d5bea27bf94e9537edccd8683accde12b7afa786ce6e8d89b34079846a44", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import List, Tuple,Optional\n", "from unskript.legos.utils import CheckOutput, CheckOutputStatus\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "from unskript.connectors.aws import aws_get_paginator\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_filter_unused_keypairs_printer(output):\n", " if output is None:\n", " return\n", " if isinstance(output, CheckOutput):\n", " print(output.json())\n", " else:\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_filter_unused_keypairs(handle, region: str = None) -> CheckOutput:\n", " \"\"\"aws_filter_unused_keypairs Returns an array of KeyPair.\n", "\n", " :type region: object\n", " :param region: Object containing global params for the notebook.\n", "\n", " :rtype: Object with status, result of unused key pairs, and error.\n", " \"\"\"\n", " all_keys_dict = {}\n", " used_keys_dict = {}\n", " key_pairs_all = []\n", " used_key_pairs = []\n", " result = []\n", " all_regions = [region]\n", " if region is None or len(region)==0:\n", " all_regions = aws_list_all_regions(handle=handle)\n", " for r in all_regions:\n", " try:\n", " ec2Client = handle.client('ec2', region_name=r)\n", " key_pairs_all = list(map(lambda i: i['KeyName'], ec2Client.describe_key_pairs()['KeyPairs']))\n", " res = aws_get_paginator(ec2Client, \"describe_instances\", \"Reservations\")\n", " for reservation in res:\n", " for keypair in reservation['Instances']:\n", " if 'KeyName'in keypair and keypair['KeyName'] not in used_key_pairs:\n", " used_key_pairs.append(keypair['KeyName'])\n", " used_keys_dict[\"region\"]=r\n", " used_keys_dict[\"key_name\"]=used_key_pairs\n", " all_keys_dict[\"region\"]=r\n", " all_keys_dict[\"key_name\"]=key_pairs_all\n", " final_dict = {}\n", " final_list=[]\n", " for k,v in all_keys_dict.items():\n", " if v!=[]:\n", " if k==\"key_name\":\n", " for each in v:\n", " if each not in used_keys_dict[\"key_name\"]:\n", " final_list.append(each)\n", " if len(final_list)!=0:\n", " final_dict[\"region\"]=r\n", " final_dict[\"unused_keys\"]=final_list\n", " if len(final_dict)!=0:\n", " result.append(final_dict)\n", " except Exception as e:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\"\n", " }''')\n", "\n", "task.configure(outputName=\"unused_keypairs\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_filter_unused_keypairs, lego_printer=aws_filter_unused_keypairs_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "732807f2-94cc-4741-b14e-92bbf46b4724", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "unused_key_pairs
This action filters regions that have no unused keypairs and creates a list of those that have them.
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "379c28b0-407d-4d04-9319-d57bb5ee48e6", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-02T16:26:29.300Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Unused Keypairs", "orderProperties": [], "tags": [], "title": "Create List of Unused Keypairs" }, "outputs": [], "source": [ "all_unused_key_pairs = []\n", "if unused_keypairs[0] == False:\n", " if len(unused_keypairs[1])!=0:\n", " all_unused_key_pairs=unused_keypairs[1]\n", "print(all_unused_key_pairs)" ] }, { "cell_type": "markdown", "id": "bdb9d8ef-d374-4225-9f60-a72acab538d3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_unused_key_pairs
This action sends a message containing the region and unused keypairs list to the given channel.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "a4e3e317-bb03-4378-9ef0-7fe61fd6f6a8", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "6a87f83ab0ecfeecb9c98d084e2b1066c26fa64be5b4928d5573a5d60299802d", "condition_enabled": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Post Slack Message", "id": 78, "index": 78, "inputData": [ { "channel": { "constant": false, "value": "channel_name" }, "message": { "constant": false, "value": "\"Unused Keypairs- {}\".format(all_unused_key_pairs)" } } ], "inputschema": [ { "properties": { "channel": { "description": "Name of slack channel.", "title": "Channel", "type": "string" }, "message": { "description": "Message for slack channel.", "title": "Message", "type": "string" } }, "required": [ "channel", "message" ], "title": "slack_post_message", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_SLACK", "name": "Post Slack Message", "nouns": [], "orderProperties": [ "channel", "message" ], "output": { "type": "" }, "printOutput": true, "startcondition": "len(channel_name)!=0", "tags": [ "slack_post_message" ], "verbs": [] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "\n", "import pprint\n", "\n", "from pydantic import BaseModel, Field\n", "from slack_sdk import WebClient\n", "from slack_sdk.errors import SlackApiError\n", "\n", "pp = pprint.PrettyPrinter(indent=2)\n", "\n", "from beartype import beartype\n", "\n", "from beartype import beartype\n", "@beartype\n", "def slack_post_message_printer(output):\n", " if output is not None:\n", " pprint.pprint(output)\n", " else:\n", " return\n", "\n", "\n", "@beartype\n", "@beartype\n", "def slack_post_message(\n", " handle: WebClient,\n", " channel: str,\n", " message: str) -> str:\n", "\n", " try:\n", " response = handle.chat_postMessage(\n", " channel=channel,\n", " text=message)\n", " return f\"Successfuly Sent Message on Channel: #{channel}\"\n", " except SlackApiError as e:\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.response['error']}\")\n", " if e.response['error'] == 'channel_not_found':\n", " raise Exception('Channel Not Found')\n", " elif e.response['error'] == 'duplicate_channel_not_found':\n", " raise Exception('Channel associated with the message_id not valid')\n", " elif e.response['error'] == 'not_in_channel':\n", " raise Exception('Cannot post message to channel user is not in')\n", " elif e.response['error'] == 'is_archived':\n", " raise Exception('Channel has been archived')\n", " elif e.response['error'] == 'msg_too_long':\n", " raise Exception('Message text is too long')\n", " elif e.response['error'] == 'no_text':\n", " raise Exception('Message text was not provided')\n", " elif e.response['error'] == 'restricted_action':\n", " raise Exception('Workspace preference prevents user from posting')\n", " elif e.response['error'] == 'restricted_action_read_only_channel':\n", " raise Exception('Cannot Post message, read-only channel')\n", " elif e.response['error'] == 'team_access_not_granted':\n", " raise Exception('The token used is not granted access to the workspace')\n", " elif e.response['error'] == 'not_authed':\n", " raise Exception('No Authtnecition token provided')\n", " elif e.response['error'] == 'invalid_auth':\n", " raise Exception('Some aspect of Authentication cannot be validated. Request denied')\n", " elif e.response['error'] == 'access_denied':\n", " raise Exception('Access to a resource specified in the request denied')\n", " elif e.response['error'] == 'account_inactive':\n", " raise Exception('Authentication token is for a deleted user')\n", " elif e.response['error'] == 'token_revoked':\n", " raise Exception('Authentication token for a deleted user has been revoked')\n", " elif e.response['error'] == 'no_permission':\n", " raise Exception('The workspace toekn used does not have necessary permission to send message')\n", " elif e.response['error'] == 'ratelimited':\n", " raise Exception('The request has been ratelimited. Retry sending message later')\n", " elif e.response['error'] == 'service_unavailable':\n", " raise Exception('The service is temporarily unavailable')\n", " elif e.response['error'] == 'fatal_error':\n", " raise Exception('The server encountered catostrophic error while sending message')\n", " elif e.response['error'] == 'internal_error':\n", " raise Exception('The server could not complete operation, likely due to transietn issue')\n", " elif e.response['error'] == 'request_timeout':\n", " raise Exception('Sending message error via POST: either message was missing or truncated')\n", " else:\n", " raise Exception(f'Failed Sending Message to slack channel {channel} Error: {e.response[\"error\"]}')\n", "\n", " except Exception as e:\n", " print(\"\\n\\n\")\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.__str__()}\")\n", " return f\"Unable to send message on {channel}\"\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"channel\": \"channel_name\",\n", " \"message\": \"\\\\\"Unused Keypairs- {}\\\\\".format(all_unused_key_pairs)\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(channel_name)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(slack_post_message, lego_printer=slack_post_message_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "4cb76d21-9731-4e77-ad80-8ac4033c79b3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "None
In this Runbook, we were able to filter unused keypairs and notify that list via slack message to the given channel. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "List unused Amazon EC2 key pairs", "parameters": [ "channel_name", "region" ] }, "kernelspec": { "display_name": "unSkript (Build: 1169)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "parameterSchema": { "properties": { "channel": { "description": "Slack channel to send the notification. Eg: dummy, general", "title": "channel", "type": "string" }, "region": { "description": "AWS Region to search for unused keys. Eg: \"us-west-2\". If left blank, all regions will be considered.", "title": "region", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": {}, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Notify_About_Unused_Keypairs.json ================================================ { "name": "List unused Amazon EC2 key pairs", "description": "This runbook finds all EC2 key pairs that are not used by an EC2 instance and notifies a slack channel about them. Optionally it can delete the key pairs based on user configuration.", "uuid": "a28edafac5f3bac3ca34d677d9b01a4bc6f74893e50bc103e5cefb00e0f48746", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Purchase_Reserved_Cache_Nodes_For_Long_Running_ElastiCache_Clusters.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "5424264e-6195-4cf9-906b-24b02d5a83f3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Find Long Running AWS ElastiCache Clusters without Reserved Cache Nodes
2) Purchase Reserved Cache Nodes
Using unSkript's Find Long Running AWS ElastiCache Clusters without Reserved Nodes action, we will find clusters that have been running for longer than a specified threshold and do not have reserved cache nodes purchased for them.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region, threshold
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "acc43420-0189-440d-9bac-a431b014d69c", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ELASTICACHE" ], "actionDescription": "This action gets information about long running ElastiCache clusters and their status, and checks if they have any reserved nodes associated with them.", "actionEntryFunction": "aws_get_long_running_elasticcache_clusters_without_reserved_nodes", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Get Long Running ElastiCache clusters Without Reserved Nodes", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "48dd783f3952172c7cf417df55341c1abd4458ad085181ad9367b677b646e86f", "collapsed": true, "continueOnError": false, "credentialsJson": {}, "description": "This action gets information about long running ElastiCache clusters and their status, and checks if they have any reserved nodes associated with them.", "execution_data": { "last_date_success_run_cell": "2023-05-12T15:59:38.022Z" }, "id": 1, "index": 1, "inputData": [ { "region": { "constant": false, "value": "region" }, "threshold": { "constant": false, "value": "threshold_int" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region to get the ElasticCache Cluster", "title": "AWS Region", "type": "string" }, "threshold": { "default": 10, "description": "Threshold(in days) to find long running ElasticCache clusters. Eg: 30, This will find all the clusters that have been created a month ago.", "title": "Threshold(in days)", "type": "number" } }, "required": [], "title": "aws_get_long_running_elasticcache_clusters_without_reserved_nodes", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Get Long Running ElastiCache clusters Without Reserved Nodes", "orderProperties": [ "region", "threshold" ], "outputParams": { "output_name": "clusters_without_reserved_nodes", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "tags": [ "aws_get_long_running_elasticcache_clusters_without_reserved_nodes" ], "uuid": "48dd783f3952172c7cf417df55341c1abd4458ad085181ad9367b677b646e86f", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Tuple\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "import pprint\n", "from datetime import datetime,timedelta, timezone\n", "\n", "\n", "\n", "from typing import Optional\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_long_running_elasticcache_clusters_without_reserved_nodes_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_get_long_running_elasticcache_clusters_without_reserved_nodes(handle, region: str = \"\", threshold:int = 10) -> Tuple:\n", " \"\"\"aws_get_long_running_elasticcache_clusters_without_reserved_nodes finds ElasticCache Clusters that are long running and have no reserved nodes\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: string\n", " :param region: Region of the Cluster.\n", "\n", " :type threshold: integer\n", " :param threshold: Threshold(in days) to find long running ElasticCache clusters. Eg: 30, This will find all the clusters that have been created a month ago.\n", "\n", " :rtype: status, list of clusters, nodetype and their region.\n", " \"\"\"\n", " result = []\n", " reservedNodesPerRegion = {}\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", " # Get the list of reserved node per region per type. We just need to maintain\n", " # what type of reserved nodes are present per region. So, reservedNodesPerRegion\n", " # would be like:\n", " #This action captures the following output:
\n", "clusters_without_reserved_nodes
This action filters regions that have no clusters and creates a list of those that have them (without reserved cache nodes).
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "6a10e980-9f17-4436-9166-90ea130aa316", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-05-12T15:59:48.109Z" }, "jupyter": { "source_hidden": true }, "name": "Create List of Clusters without reserved nodes", "orderProperties": [], "tags": [], "title": "Create List of Clusters without reserved nodes" }, "outputs": [], "source": [ "all_clusters_without_reserved_nodes = []\n", "dummy = []\n", "for res in clusters_without_reserved_nodes:\n", " if type(res)==bool:\n", " if res == False:\n", " continue\n", " elif type(res)==list:\n", " if len(res)!=0:\n", " all_clusters_without_reserved_nodes=res\n", "print(all_clusters_without_reserved_nodes)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "978d3b61-2fd9-461d-89bd-534d2dcf3b63", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_clusters_without_reserved_nodes
This action Purchases Reserved Cache Nodes for the clusters found in Step 1.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "08a796e9-73bd-4969-97a7-214f062058e6", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ELASTICACHE" ], "actionDescription": "This action purchases a reserved cache node offering.", "actionEntryFunction": "aws_purchase_elasticcache_reserved_node", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": false, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Purchase ElastiCache Reserved Nodes", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "b3a50ef59c3ac1727671ecde28e9194c00857bd8c8b26546ea70606ddf8e6914", "collapsed": true, "continueOnError": false, "credentialsJson": {}, "description": "This action purchases a reserved cache node offering.", "id": 4, "index": 4, "inputData": [ { "no_of_nodes": { "constant": false, "value": "no_of_nodes" }, "region": { "constant": false, "value": "region" }, "reserved_node_offering_id": { "constant": false, "value": "reserved_cache_node_offering_id" } } ], "inputschema": [ { "properties": { "no_of_nodes": { "default": 1, "description": "The number of reserved cache nodes that you want to purchase.", "title": "No of nodes to purchase", "type": "integer" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "reserved_node_offering_id": { "description": "The unique identifier of the reserved cache node offering you want to purchase.", "title": "Reserved Cache Node Offering ID", "type": "string" } }, "required": [ "region", "reserved_node_offering_id" ], "title": "aws_purchase_elasticcache_reserved_node", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Purchase ElastiCache Reserved Nodes", "orderProperties": [ "region", "reserved_node_offering_id", "no_of_nodes" ], "printOutput": true, "tags": [ "aws_purchase_elasticcache_reserved_node" ], "uuid": "b3a50ef59c3ac1727671ecde28e9194c00857bd8c8b26546ea70606ddf8e6914", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2023 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Dict\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_purchase_elasticcache_reserved_node_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_purchase_elasticcache_reserved_node(handle, region: str, reserved_node_offering_id: str, no_of_nodes:int=1) -> Dict:\n", " \"\"\"aws_purchase_elasticcache_reserved_node returns dict of response.\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :type reserved_node_offering_id: string\n", " :param reserved_node_offering_id: The unique identifier of the reserved node offering you want to purchase. Example: '438012d3-4052-4cc7-b2e3-8d3372e0e706'\n", "\n", " :type no_of_nodes: int\n", " :param no_of_nodes: The number of reserved nodes that you want to purchase.\n", "\n", " :rtype: dict of response metatdata of purchasing a reserved node\n", " \"\"\"\n", " try:\n", " elasticClient = handle.client('elasticache', region_name=region)\n", " params = {\n", " 'ReservedCacheNodesOfferingId': reserved_node_offering_id,\n", " 'CacheNodeCount': no_of_nodes\n", " }\n", " response = elasticClient.purchase_reserved_cache_nodes_offering(**params)\n", " return response\n", " except Exception as e:\n", " raise Exception(e)\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"reserved_node_offering_id\": \"reserved_cache_node_offering_id\",\n", " \"no_of_nodes\": int(no_of_nodes)\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_purchase_elasticcache_reserved_node, lego_printer=aws_purchase_elasticcache_reserved_node_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "44a6cf05-385b-445d-a503-ad4aa607a568", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "no_of_nodes, region, reserved_node_offering_id
In this Runbook, we were able to filter long running ElastiCache clusters without reserved nodes given a threshold number of days of creation and purchase nodes for them. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Purchase Reserved Nodes For Long Running AWS ElastiCache Clusters", "parameters": [ "region", "threshold_days" ] }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "no_of_nodes": { "default": 1, "description": "The number of cache node instances to reserve. The default value is 1 (node).", "title": "no_of_nodes", "type": "number" }, "region": { "description": "AWS Region to get the RDS Instances from. Eg: \"us-west-2\".", "title": "region", "type": "string" }, "reserved_cache_node_offering_id": { "description": "The ID of the reserved cache node offering to purchase. Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706", "title": "reserved_cache_node_offering_id", "type": "string" }, "threshold": { "default": 10, "description": "Threshold (in days) to find long running ElastiCache clusters. Eg: 30 , this will get all the clusters that have been running for more than 30 days. The default value is 10 days.", "title": "threshold", "type": "number" } }, "required": [ "reserved_cache_node_offering_id" ], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Purchase_Reserved_Cache_Nodes_For_Long_Running_ElastiCache_Clusters.json ================================================ { "name": "Purchase Reserved Nodes For Long Running AWS ElastiCache Clusters", "description": "Ensuring that long-running AWS ElastiCache clusters have Reserved Nodes purchased for them is an effective cost optimization strategy for AWS users. By committing to a specific capacity of ElastiCache nodes for a period of one or three years, users can take advantage of significant discounts compared to On-Demand pricing. This approach can help optimize AWS costs for ElastiCache clusters that are expected to run for an extended period and have predictable usage patterns. This runbook helps us optimize costs by ensuring that Reserved Nodes are purchased for these ElastiCache clusters.", "uuid": "51a0b15d932dddeea9b1991fb6299577756408ff7c47acc5dec3eb114e33562b", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_CLOUDOPS"], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Purchase_Reserved_Instances_For_Long_Running_RDS_Instances.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "5424264e-6195-4cf9-906b-24b02d5a83f3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Find Long Running AWS RDS Instances without Reserved Instances
2) Purchase Reserved Instance
Using unSkript's Find Long Running AWS RDS Instances without Reserved Instances action, we will find RDS DB Instances that have been running for longer than a specified threshold and do not have reserved instances purchased for them.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region, threshold
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "a1efdab1-97ed-4d4d-bcab-5edd1eee6ffb", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_RDS" ], "actionDescription": "This action gets information about long running instances and their status, and checks if they have any reserved nodes associated with them.", "actionEntryFunction": "aws_get_long_running_rds_instances_without_reserved_instances", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Get Long Running RDS Instances Without Reserved Instances", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "77d61931741da6d2be410571e205c93962815430843b1fbaf8e575e6384598ae", "collapsed": true, "continueOnError": false, "credentialsJson": {}, "description": "This action gets information about long running instances and their status, and checks if they have any reserved nodes associated with them.", "execution_data": { "last_date_success_run_cell": "2023-05-12T16:34:16.408Z" }, "id": 15, "index": 15, "inputData": [ { "region": { "constant": false, "value": "region" }, "threshold": { "constant": false, "value": "threshold_int" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region.", "title": "AWS Region", "type": "string" }, "threshold": { "default": 10, "description": "Threshold(in days) to find long running RDS instances. Eg: 30, This will find all the instances that have been created a month ago.", "title": "Threshold(in days)", "type": "number" } }, "required": [], "title": "aws_get_long_running_rds_instances_without_reserved_instances", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Get Long Running RDS Instances Without Reserved Instances", "orderProperties": [ "region", "threshold" ], "outputParams": { "output_name": "clusters_without_reserved_instances", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "tags": [ "aws_get_long_running_rds_instances_without_reserved_instances" ], "uuid": "77d61931741da6d2be410571e205c93962815430843b1fbaf8e575e6384598ae", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "import pprint\n", "from typing import Optional, Tuple\n", "from pydantic import BaseModel, Field\n", "from datetime import datetime,timedelta, timezone\n", "from unskript.connectors.aws import aws_get_paginator\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_long_running_rds_instances_without_reserved_instances_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def aws_get_long_running_rds_instances_without_reserved_instances(handle, region: str = \"\", threshold:int=10) -> Tuple:\n", " \"\"\"aws_get_long_running_rds_instances_without_reserved_instances Gets all DB instances that are not m5 or t3.\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :type threshold: int\n", " :param threshold: Threshold(in days) to find long running RDS instances. Eg: 30, This will find all the instances that have been created a month ago.\n", "\n", " :rtype: A tuple with a Status,and list of DB instances that don't have reserved instances\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " reservedInstancesPerRegion = {}\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", " for reg in all_regions:\n", " try:\n", " rdsClient = handle.client('rds', region_name=reg)\n", " response = rdsClient.describe_reserved_nodes()\n", " reservedInstancesPerType = {}\n", " if response['ReservedDBInstances']:\n", " for ins in response['ReservedDBInstances']:\n", " reservedInstancesPerRegion[ins['DBInstanceClass']] = True\n", " else:\n", " continue\n", " reservedInstancesPerRegion[reg] = reservedInstancesPerType\n", " except Exception:\n", " pass\n", " for reg in all_regions:\n", " try:\n", " rdsClient = handle.client('rds', region_name=reg)\n", " response = aws_get_paginator(rdsClient, \"describe_db_instances\", \"DBInstances\")\n", " for instance in response:\n", " if instance['DBInstanceStatus'] == 'available':\n", " uptime = datetime.now(timezone.utc) - instance['InstanceCreateTime']\n", " if uptime > timedelta(days=threshold):\n", " # Check if the cluster node type is present in the reservedInstancesPerRegion map.\n", " reservedInstances = reservedInstancesPerRegion.get(reg)\n", " if reservedInstances != None:\n", " if reservedInstances.get(instance['DBInstanceClass']) == True:\n", " continue\n", " db_instance_dict = {}\n", " db_instance_dict[\"region\"] = reg\n", " db_instance_dict[\"instance_type\"] = instance['DBInstanceClass']\n", " db_instance_dict[\"instance\"] = instance['DBInstanceIdentifier']\n", " result.append(db_instance_dict)\n", " except Exception:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"threshold\": \"int(threshold_int)\"\n", " }''')\n", "\n", "task.configure(outputName=\"clusters_without_reserved_instances\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_long_running_rds_instances_without_reserved_instances, lego_printer=aws_get_long_running_rds_instances_without_reserved_instances_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "3f369bc9-53d0-44c8-af50-80ba7885c657", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "clusters_without_reserved_instances
This action filters regions that have no clusters and creates a list of those that have them (without reserved instances).
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "6a10e980-9f17-4436-9166-90ea130aa316", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-05-12T16:34:22.299Z" }, "jupyter": { "source_hidden": true }, "name": "Create List of Low CPU Utilization RDS Instances", "orderProperties": [], "tags": [], "title": "Create List of Low CPU Utilization RDS Instances" }, "outputs": [], "source": [ "all_clusters_without_reserved_instances = []\n", "dummy = []\n", "for res in clusters_without_reserved_instances:\n", " if type(res)==bool:\n", " if res == False:\n", " continue\n", " elif type(res)==list:\n", " if len(res)!=0:\n", " all_clusters_without_reserved_instances=res\n", "print(all_clusters_without_reserved_instances)" ] }, { "cell_type": "markdown", "id": "978d3b61-2fd9-461d-89bd-534d2dcf3b63", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_clusters_without_reserved_instances
This action Purchases Reserved Instances for the clusters found in Step 1.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "b1a73789-b8a6-4f04-97b8-09d784a8a916", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_RDS" ], "actionDescription": "This action purchases a reserved DB instance offering.", "actionEntryFunction": "aws_purchase_rds_reserved_instance", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": false, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Purchase RDS Reserved Instances", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "e38b3b31c357018f66d779266a5f1692dda78556eb22eb02e3acaf9ad2d69b3d", "collapsed": true, "continueOnError": false, "credentialsJson": {}, "description": "This action purchases a reserved DB instance offering.", "id": 4, "index": 4, "inputData": [ { "db_instance_count": { "constant": false, "value": "db_instance_count" }, "region": { "constant": false, "value": "region" }, "reserved_instance_offering_id": { "constant": false, "value": "reserved_instance_offering_id" } } ], "inputschema": [ { "properties": { "db_instance_count": { "default": 1, "description": "The number of instances to reserve.", "title": "Instance Count", "type": "integer" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "reserved_instance_offering_id": { "description": "The ID of the Reserved DB instance offering to purchase. Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706", "title": "Reserved Instance Offering ID", "type": "string" } }, "required": [ "region", "reserved_instance_offering_id" ], "title": "aws_purchase_rds_reserved_instance", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Purchase RDS Reserved Instances", "orderProperties": [ "region", "reserved_instance_offering_id", "db_instance_count" ], "printOutput": true, "tags": [ "aws_purchase_rds_reserved_instance" ], "uuid": "e38b3b31c357018f66d779266a5f1692dda78556eb22eb02e3acaf9ad2d69b3d", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2023 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Dict\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_purchase_rds_reserved_instance_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_purchase_rds_reserved_instance(handle, region: str, reserved_instance_offering_id: str, db_instance_count:int=1) -> Dict:\n", " \"\"\"aws_purchase_rds_reserved_instance returns dict of response.\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :type reserved_instance_offering_id: string\n", " :param reserved_instance_offering_id: The unique identifier of the reserved instance offering you want to purchase.\n", "\n", " :type db_instance_count: int\n", " :param db_instance_count: The number of reserved instances that you want to purchase.\n", "\n", " :rtype: dict of response metatdata of purchasing a reserved instance\n", " \"\"\"\n", " try:\n", " redshiftClient = handle.client('redshift', region_name=region)\n", " params = {\n", " 'ReservedDBInstancesOfferingId': reserved_instance_offering_id,\n", " 'DBInstanceCount': db_instance_count\n", " }\n", " response = redshiftClient.purchase_reserved_db_instances_offering(**params)\n", " return response\n", " except Exception as e:\n", " raise Exception(e)\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"reserved_instance_offering_id\": \"reserved_instance_offering_id\",\n", " \"db_instance_count\": int(db_instance_count)\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_purchase_rds_reserved_instance, lego_printer=aws_purchase_rds_reserved_instance_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "44a6cf05-385b-445d-a503-ad4aa607a568", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "db_instance_count, region, reserved_instance_offering_id
In this Runbook, we were able to filter long running RDS instances without reserved nodes given a threshold number of days of creation and purchase instances for them. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Purchase Reserved Instances For Long Running AWS RDS Instances", "parameters": [ "region", "threshold_days" ] }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "db_instance_count": { "default": 1, "description": "Number of reserved instances to create. The default value is 1.", "title": "db_instance_count", "type": "number" }, "region": { "description": "AWS region. Eg: \"us-west-2\"", "title": "region", "type": "string" }, "reserved_instance_offering_id": { "description": "The ID of the reserved instance offering to purchase. Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706", "title": "reserved_instance_offering_id", "type": "string" }, "threshold": { "default": 10, "description": "Threshold (in days) to find long running RDS Instances. Eg: 30 , this will get all the instances that have been running for more than 30 days. The default value is 10 days.", "title": "threshold", "type": "number" } }, "required": [ "reserved_instance_offering_id" ], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Purchase_Reserved_Instances_For_Long_Running_RDS_Instances.json ================================================ { "name": "Purchase Reserved Instances For Long Running AWS RDS Instances", "description": "Ensuring that long-running AWS RDS instances have Reserved Instances purchased for them is an important cost optimization strategy for AWS users. By committing to a specific capacity of RDS instances for a period of one or three years, users can take advantage of significant discounts compared to On-Demand pricing. This approach can help optimize AWS costs for RDS instances that are expected to run for an extended period and have predictable usage patterns. This runbook helps us to optimize costs by ensuring that Reserved Instances are purchased for these RDS instances.", "uuid": "e0ff270a41b65b1804da257ffec5fbdec7dd51bdb3da925cced7fa3391bfe70b", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_CLOUDOPS"], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Purchase_Reserved_Nodes_For_Long_Running_Redshift_Clusters.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "5424264e-6195-4cf9-906b-24b02d5a83f3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Find Long Running AWS Redshift Clusters without Reserved Clusters
2) Purchase Reserved Node
Using unSkript's Find Long Running AWS Redshift Clusters without Reserved Nodes action, we will find clusters that have been running for longer than a specified threshold and do not have reserved nodes purchased for them.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region, threshold
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "c2b68fa5-a047-4e34-afa7-b016cb5843b7", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_REDSHIFT" ], "actionDescription": "This action gets information about running clusters and their status, and checks if they have any reserved nodes associated with them.", "actionEntryFunction": "aws_get_long_running_redshift_clusters_without_reserved_nodes", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Get Long Running Redshift Clusters Without Reserved Nodes", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "04cd063254d5417f558b574e5ae0e90f5a576397b2ce63a53fbb3125b2f99791", "collapsed": true, "continueOnError": false, "credentialsJson": {}, "description": "This action gets information about running clusters and their status, and checks if they have any reserved nodes associated with them.", "execution_data": { "last_date_success_run_cell": "2023-05-12T16:53:09.999Z" }, "id": 1, "index": 1, "inputData": [ { "region": { "constant": false, "value": "region" }, "threshold": { "constant": false, "value": "threshold_int" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region to get the Redshift Cluster", "title": "AWS Region", "type": "string" }, "threshold": { "default": 10, "description": "Threshold(in days) to find long running redshift clusters. Eg: 30, This will find all the clusters that have been created a month ago.", "title": "Threshold(in days)", "type": "number" } }, "required": [], "title": "aws_get_long_running_redshift_clusters_without_reserved_nodes", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Get Long Running Redshift Clusters Without Reserved Nodes", "orderProperties": [ "region", "threshold" ], "outputParams": { "output_name": "clusters_without_reserved_nodes", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "tags": [ "aws_get_long_running_redshift_clusters_without_reserved_nodes" ], "uuid": "04cd063254d5417f558b574e5ae0e90f5a576397b2ce63a53fbb3125b2f99791", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Tuple\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "import pprint\n", "from datetime import datetime,timedelta, timezone\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_long_running_redshift_clusters_without_reserved_nodes_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_get_long_running_redshift_clusters_without_reserved_nodes(handle, region: str = \"\", threshold:int = 10) -> Tuple:\n", " \"\"\"aws_get_long_running_redshift_clusters_without_reserved_nodes finds Redshift Clusters that are long running and have no reserved nodes\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: string\n", " :param region: Region of the Cluster.\n", "\n", " :type threshold: integer\n", " :param threshold: Threshold(in days) to find long running redshift clusters. Eg: 30, This will find all the clusters that have been created a month ago.\n", "\n", " :rtype: status, list of clusters, nodetype and their region.\n", " \"\"\"\n", " result = []\n", " reservedNodesPerRegion = {}\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", " for reg in all_regions:\n", " try:\n", " redshiftClient = handle.client('redshift', region_name=reg)\n", " response = redshiftClient.describe_reserved_nodes()\n", " reservedNodesPerType = {}\n", " if response['ReservedNodes']:\n", " for node in response['ReservedNodes']:\n", " reservedNodesPerType[node['NodeType']] = True\n", " else:\n", " continue\n", " reservedNodesPerRegion[reg] = reservedNodesPerType\n", " except Exception:\n", " pass\n", " for reg in all_regions:\n", " try:\n", " redshiftClient = handle.client('redshift', region_name=reg)\n", " for cluster in redshiftClient.describe_clusters()['Clusters']:\n", " cluster_age = datetime.now(timezone.utc) - cluster['ClusterCreateTime']\n", " if cluster['ClusterStatus'] == 'available' and cluster_age > timedelta(days=threshold):\n", " # Check if the cluster node type is present in the reservedNodesPerRegion map.\n", " reservedNodes = reservedNodesPerRegion.get(reg)\n", " if reservedNodes != None:\n", " if reservedNodes.get(cluster['NodeType']) == True:\n", " continue\n", " cluster_dict = {}\n", " cluster_dict[\"region\"] = reg\n", " cluster_dict[\"cluster\"] = cluster['ClusterIdentifier']\n", " cluster_dict[\"node_type\"] = cluster['NodeType']\n", " result.append(cluster_dict)\n", " except Exception:\n", " pass\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"threshold\": \"int(threshold_int)\"\n", " }''')\n", "task.configure(outputName=\"clusters_without_reserved_nodes\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_long_running_redshift_clusters_without_reserved_nodes, lego_printer=aws_get_long_running_redshift_clusters_without_reserved_nodes_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "199591ef-cb3a-49b7-b515-3c6998050320", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "clusters_without_reserved_nodes
This action filters regions that have no clusters and creates a list of those that have them (without reserved nodes).
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "6a10e980-9f17-4436-9166-90ea130aa316", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-05-12T16:53:13.534Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Clusters without reserved nodes", "orderProperties": [], "tags": [], "title": "Create List of Clusters without reserved nodes" }, "outputs": [], "source": [ "all_clusters_without_reserved_nodes = []\n", "dummy = []\n", "for res in clusters_without_reserved_nodes:\n", " if type(res)==bool:\n", " if res == False:\n", " continue\n", " elif type(res)==list:\n", " if len(res)!=0:\n", " all_clusters_without_reserved_nodes=res\n", "print(all_clusters_without_reserved_nodes)" ] }, { "cell_type": "markdown", "id": "978d3b61-2fd9-461d-89bd-534d2dcf3b63", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_clusters_without_reserved_nodes
This action Purchases Reserved Nodes for the clusters found in Step 1.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "5528b411-1a01-4230-af26-014ad7e951e2", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_REDSHIFT" ], "actionDescription": "This action purchases reserved nodes. Amazon Redshift offers a predefined set of reserved node offerings. You can purchase one or more of the offerings.", "actionEntryFunction": "aws_purchase_redshift_reserved_node", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Purchase Redshift Reserved Nodes", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "86e0a0ac26eb1973118755e8dded5fa2ee4af6a9a501f7eeeda2917933d7a9f1", "continueOnError": false, "credentialsJson": {}, "description": "This action purchases reserved nodes. Amazon Redshift offers a predefined set of reserved node offerings. You can purchase one or more of the offerings.", "id": 17, "index": 17, "inputData": [ { "no_of_nodes": { "constant": false, "value": "no_of_nodes" }, "region": { "constant": false, "value": "region" }, "reserved_node_offering_id": { "constant": false, "value": "reserved_node_offering_id" } } ], "inputschema": [ { "properties": { "no_of_nodes": { "default": 1, "description": "The number of reserved nodes that you want to purchase.", "title": "No od Nodes to reserve", "type": "integer" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "reserved_node_offering_id": { "description": "The unique identifier of the reserved node offering you want to purchase.", "title": "Reserved Node Offering ID", "type": "string" } }, "required": [ "region", "reserved_node_offering_id" ], "title": "aws_purchase_redshift_reserved_node", "type": "object" } ], "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Purchase Redshift Reserved Nodes", "orderProperties": [ "region", "reserved_node_offering_id", "no_of_nodes" ], "printOutput": true, "tags": [ "aws_purchase_redshift_reserved_node" ], "uuid": "86e0a0ac26eb1973118755e8dded5fa2ee4af6a9a501f7eeeda2917933d7a9f1", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2023 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Dict\n", "import pprint\n", "\n", "\n", "\n", "from typing import Optional\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_purchase_redshift_reserved_node_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_purchase_redshift_reserved_node(handle, region: str, reserved_node_offering_id: str, no_of_nodes:int=1) -> Dict:\n", " \"\"\"aws_purchase_redshift_reserved_node returns dict of response.\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :type reserved_node_offering_id: string\n", " :param reserved_node_offering_id: The unique identifier of the reserved node offering you want to purchase.\n", "\n", " :type no_of_nodes: int\n", " :param no_of_nodes: The number of reserved nodes that you want to purchase.\n", "\n", " :rtype: dict of response metatdata of purchasing a reserved node\n", " \"\"\"\n", " try:\n", " redshiftClient = handle.client('redshift', region_name=region)\n", " params = {\n", " 'ReservedNodeOfferingId': reserved_node_offering_id,\n", " 'NodeCount': no_of_nodes\n", " }\n", " response = redshiftClient.purchase_reserved_node_offering(**params)\n", " return response\n", " except Exception as e:\n", " raise Exception(e)\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"reserved_node_offering_id\": \"reserved_node_offering_id\",\n", " \"no_of_nodes\": int(no_of_nodes)\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_purchase_redshift_reserved_node, lego_printer=aws_purchase_redshift_reserved_node_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "44a6cf05-385b-445d-a503-ad4aa607a568", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "no_of_nodes, region, reserved_node_offering_id
n this Runbook, we were able to filter long running Redshift clusters without reserved nodes given a threshold number of days of creation and purchase nodes for them. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Purchase Reserved Nodes For Long Running AWS Redshift Clusters", "parameters": [ "region", "threshold_days" ] }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "no_of_nodes": { "default": 1, "description": "The number of cache node instances to reserve. The default value is 1 (node).", "title": "no_of_nodes", "type": "number" }, "region": { "description": "AWS region. Eg: 'us-west-2'", "title": "region", "type": "string" }, "reserved_node_offering_id": { "description": "The ID of the reserved node offering to purchase. Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706", "title": "reserved_node_offering_id", "type": "string" }, "threshold": { "default": 10, "description": "Threshold (in days) to find long running Redshift clusters. Eg: 30 , this will get all the clusters that have been running for more than 30 days. The default value is 10 days.", "title": "threshold", "type": "number" } }, "required": [ "reserved_node_offering_id" ], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Purchase_Reserved_Nodes_For_Long_Running_Redshift_Clusters.json ================================================ { "name": "Purchase Reserved Nodes For Long Running AWS Redshift Clusters", "description": "Ensuring that long-running AWS Redshift Clusters have Reserved Nodes purchased for them is a critical cost optimization strategy . By committing to a specific capacity of Redshift nodes for a period of one or three years, users can take advantage of significant discounts compared to On-Demand pricing. This approach can help optimize AWS costs for Redshift Clusters that are expected to run for an extended period and have predictable usage patterns. This runbook helps us to ensure that Reserved Nodes are purchased for these clusters so that users can effectively plan ahead, reduce their AWS bill, and optimize their costs over time.", "uuid": "08d3033e428c5fa241be26cfc8787fb16c05c6aa31830075e730fefd5aaf744f", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_CLOUDOPS"], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Release_Unattached_Elastic_IPs.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "82eebdfd-c880-40df-bd6d-5b546c92164b", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Find unattached Elastic IPs
2) Delete unattached Elastic IPs
Using unSkript's Find unattached Elastic IPs action, we will find unattahched Elastic IPs which don't have any instances associated to them.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "36acabd0-68b0-4fe8-adf5-39db2cf00962", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2" ], "actionDescription": "This action lists Elastic IP address and check if it is associated with an instance or network interface.", "actionEntryFunction": "aws_list_unattached_elastic_ips", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "a9d7ea5f3d31745f1de9fb8616ab6fbc20ff11e665808bdde6a9ba9b8b32e28a" ], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS List Unattached Elastic IPs", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "9f378662591138c29993d482db1c391aa2d154ffc7142b27824dc2766a5e2a69", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "This action lists Elastic IP address and check if it is associated with an instance or network interface.", "id": 1, "index": 1, "inputData": [ { "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [], "title": "aws_list_unattached_elastic_ips", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS List Unattached Elastic IPs", "orderProperties": [ "region" ], "outputParams": { "output_name": "unused_ips", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not allocation_ips", "tags": [ "aws_list_unattached_elastic_ips" ], "uuid": "9f378662591138c29993d482db1c391aa2d154ffc7142b27824dc2766a5e2a69", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "import pprint\n", "from typing import Optional, Tuple\n", "from pydantic import BaseModel, Field\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_list_unattached_elastic_ips_printer(output):\n", " if output is None:\n", " return\n", "\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_list_unattached_elastic_ips(handle, region: str = \"\") -> Tuple:\n", " \"\"\"aws_list_unattached_elastic_ips Returns an array of unattached elastic IPs.\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :rtype: Tuple with status result and list of unattached elastic IPs.\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", "\n", " for reg in all_regions:\n", " try:\n", " # Filtering the public_ip by region\n", " ec2Client = handle.client('ec2', region_name=reg)\n", " all_eips = ec2Client.describe_addresses()\n", " for eip in all_eips[\"Addresses\"]:\n", " vpc_data = {}\n", " if 'AssociationId' not in eip:\n", " vpc_data[\"public_ip\"] = eip['PublicIp']\n", " vpc_data[\"allocation_id\"] = eip['AllocationId']\n", " vpc_data[\"region\"] = reg\n", " result.append(vpc_data)\n", " except Exception:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not allocation_ips\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"unused_ips\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_list_unattached_elastic_ips, lego_printer=aws_list_unattached_elastic_ips_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "a311041f-620a-4b6b-914f-e52c6c3a71f4", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "unused_ips
This action filters regions that have no unattached Elastic IPs and creates a list of those that have them.
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": 28, "id": "b85ce542-bdf0-44d2-9e75-213002d5c036", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-04-20T10:16:03.026Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Unallocated Elastic IPs", "orderProperties": [], "tags": [], "title": "Create List of Unallocated Elastic IPs" }, "outputs": [], "source": [ "all_unused_ips = []\n", "try:\n", " if unused_ips[0] == False:\n", " if len(unused_ips[1])!=0:\n", " all_unused_ips=unused_ips[1]\n", "except Exception:\n", " for ids in allocation_ids:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"allocation_id\"] = ids\n", " all_unused_ips.append(data_dict)\n", "print(all_unused_ips)" ] }, { "cell_type": "markdown", "id": "9fb3704a-9b19-49c4-96ab-a982217bbcd3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_unused_ips
This action deleted unattached Elastic IPs found in Step 1.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "4ca7a324-cd13-41d6-888f-643709c35d21", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS" ], "actionDescription": "AWS Release Elastic IP for both VPC and Standard", "actionEntryFunction": "aws_release_elastic_ip", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Release Elastic IP", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "20a5f7f3c28da1a98b78fdbc2ca582dd30c1b5a3f57bcfc9da691a3182a332c3", "collapsed": true, "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "AWS Release Elastic IP for both VPC and Standard", "id": 2, "index": 2, "inputData": [ { "allocation_id": { "constant": false, "value": "\"iter.get(\\\\\"allocation_id\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "allocation_id": { "description": "Allocation ID of the Elastic IP to release.", "title": "Allocation ID", "type": "string" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [ "allocation_id", "region" ], "title": "aws_release_elastic_ip", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "allocation_id": "allocation_id", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_unused_ips" } } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Release Elastic IP", "orderProperties": [ "allocation_id", "region" ], "printOutput": true, "startcondition": "len(all_unused_ips)!=0", "tags": [ "aws_release_elastic_ip" ], "uuid": "20a5f7f3c28da1a98b78fdbc2ca582dd30c1b5a3f57bcfc9da691a3182a332c3", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2023 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Dict\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_release_elastic_ip_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_release_elastic_ip(handle, region: str, allocation_id: str) -> Dict:\n", " \"\"\"aws_release_elastic_ip release elastic ip.\n", "\n", " :type allocation_id: string\n", " :param allocation_id: Allocation ID of the Elastic IP to release.\n", "\n", " :type region: string\n", " :param region: AWS Region.\n", "\n", " :rtype: Dict with the release elastic ip info.\n", " \"\"\"\n", " try:\n", " ec2_Client = handle.client('ec2', region_name=region)\n", " response = ec2_Client.release_address(AllocationId=allocation_id)\n", " return response\n", " except Exception as e:\n", " raise Exception(e)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"allocation_id\": \"iter.get(\\\\\"allocation_id\\\\\")\",\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_unused_ips\",\n", " \"iter_parameter\": [\"allocation_id\",\"region\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_unused_ips)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_release_elastic_ip, lego_printer=aws_release_elastic_ip_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "9c7430c8-3660-45bd-90ef-9ceab77e3daa", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "region, elastic_ip
In this Runbook, we were able to check for any unattached Elastic IP (EIP) addresses in our AWS account and release (remove) them in order to lower the cost of your monthly AWS bill. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Release Unattached AWS Elastic IPs", "parameters": null }, "kernelspec": { "display_name": "unSkript (Build: 1169)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "allocation_ids": { "description": "List of IDs that AWS assigns to represent the allocation of the Elastic IP address for use with instances in a VPC.", "title": "allocation_ids", "type": "array" }, "region": { "description": "AWS Region to search for unattached Elastic IPs. Eg: \"us-west-2\". If left blank, all regions will be considered.", "title": "region", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Release_Unattached_Elastic_IPs.json ================================================ { "name": "Release Unattached AWS Elastic IPs", "description": "A disassociated Elastic IP address remains allocated to your account until you explicitly release it. AWS imposes a small hourly charge for Elastic IP addresses that are not associated with a running instance. This runbook can be used to deleted those unattached AWS Elastic IP addresses.", "uuid": "a9d7ea5f3d31745f1de9fb8616ab6fbc20ff11e665808bdde6a9ba9b8b32e28a", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Remediate_unencrypted_S3_buckets.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "cbabc8b5-57b4-45b8-890c-370bb1ed6f02", "metadata": { "jupyter": { "source_hidden": false }, "name": "Runbook Overview", "orderProperties": [], "tags": [], "title": "Runbook Overview" }, "source": [ "Here we will use unSkript Filter Unencrypted S3 Buckets action. This action filters all the S3 buckets from the given region and returns a list of those S3 buckets without encryption. It will execute if the bucket_name parameter is not given.
\n", "\n", "\n", "Input parameters:
\n", "region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "1bd5211a-2ef5-4796-bdf6-231080e966d8", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_S3" ], "actionDescription": "Filter AWS Unencrypted S3 Buckets", "actionEntryFunction": "aws_filter_unencrypted_s3_buckets", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "50d9c6abd7dce3ff9183d4135353e82859bc5a9639455b35bd229331be6048df" ], "actionNextHopParameterMapping": { "bucket_name": ".[].bucket", "region": ".[0].region" }, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Filter AWS Unencrypted S3 Buckets", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "2fa5c0d3a9ed5951fbf2a1390610941af8e145521c244fa07b597d6ca6665a43", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "Filter AWS Unencrypted S3 Buckets", "execution_data": { "last_date_success_run_cell": "2023-05-17T16:39:37.314Z" }, "id": 1, "index": 1, "inputData": [ { "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [], "title": "aws_filter_unencrypted_s3_buckets", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Filter AWS Unencrypted S3 Buckets", "orderProperties": [ "region" ], "outputParams": { "output_name": "unencrypted_buckets", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not bucket_names", "tags": [], "uuid": "2fa5c0d3a9ed5951fbf2a1390610941af8e145521c244fa07b597d6ca6665a43", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Tuple\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "from botocore.exceptions import ClientError\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_filter_unencrypted_s3_buckets_printer(output):\n", " if output is None:\n", " return\n", "\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_filter_unencrypted_s3_buckets(handle, region: str = \"\") -> Tuple:\n", " \"\"\"aws_filter_unencrypted_s3_buckets List of unencrypted S3 bucket name .\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: string\n", " :param region: Filter S3 buckets.\n", "\n", " :rtype: Tuple with status result and list of unencrypted S3 bucket name.\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", " for reg in all_regions:\n", " try:\n", " s3Client = handle.client('s3', region_name=reg)\n", " response = s3Client.list_buckets()\n", " # List unencrypted S3 buckets\n", " for bucket in response['Buckets']:\n", " try:\n", " response = s3Client.get_bucket_encryption(Bucket=bucket['Name'])\n", " encRules = response['ServerSideEncryptionConfiguration']['Rules']\n", " except ClientError as e:\n", " bucket_dict = {}\n", " bucket_dict[\"region\"] = reg\n", " bucket_dict[\"bucket\"] = bucket['Name']\n", " result.append(bucket_dict)\n", " except Exception as error:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not bucket_names\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"unencrypted_buckets\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_filter_unencrypted_s3_buckets, lego_printer=aws_filter_unencrypted_s3_buckets_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "f2ed3b50-50f4-4983-b409-690aecf27b1c", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 Extension", "orderProperties": [], "tags": [], "title": "Step-1 Extension" }, "source": [ "Output variable:
\n", "unencrypted_buckets
In this action, we modify the output from step 1 and return a list of dictionary items for the Unencrypted S3 Buckets
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "47117b25-2533-4021-b4f3-329b7fee165e", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-10T10:31:04.455Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Modify Step-1 Output", "orderProperties": [], "tags": [], "title": "Modify Step-1 Output" }, "outputs": [], "source": [ "bucket_list = []\n", "\n", "try:\n", " if unencrypted_buckets[0] == False:\n", " for bucket in unencrypted_buckets[1]:\n", " bucket_list.append(bucket)\n", "except Exception as e:\n", " if bucket_names:\n", " for i in bucket_names:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"bucket\"] = i\n", " bucket_list.append(data_dict)\n", " else:\n", " raise Exception(e)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "0a1ba685-0340-4af8-9bc7-32e9beff2837", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "Output variable: ebs_list
\n", "
Here we will use the unSkript Apply AWS Default Encryption for the S3 Buckets action. In this action, we will apply the default encryption configuration to the unencrypted S3 buckets by passing the list of unencrypted S3 buckets from step 1.
\n", "\n", "\n", "Input parameters:
\n", "name,region
\n", "" ] }, { "cell_type": "code", "execution_count": 2, "id": "80b2e9a4-023a-4235-99ba-dce06988eb6e", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "eb57da3b21aec38d005bf0355a48ba53937c7ac62f98e9c968c9501412d72008", "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Apply a New AWS Policy for S3 Bucket", "execution_data": { "last_date_success_run_cell": "2022-08-26T20:00:28.237Z" }, "id": 135, "index": 135, "inputData": [ { "name": { "constant": false, "value": "\"iter.get(\\\\\"bucket\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "name": { "default": "", "description": "Name of the bucket.", "title": "Bucket name", "type": "string" }, "region": { "default": "", "description": "AWS region of the bucket.", "title": "Region", "type": "string" } }, "required": [ "name", "policy", "region" ], "title": "aws_put_bucket_policy", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "name": "bucket", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "bucket_list" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Apply AWS Default Encryption for S3 Bucket", "nouns": [ "aws", "policy", "bucket" ], "orderProperties": [ "name", "region" ], "output": { "type": "" }, "outputParams": { "output_name": "apply_output", "output_name_enabled": true }, "printOutput": true, "startcondition": "len(bucket_list) > 0", "tags": [ "aws_put_bucket_policy" ], "title": "Apply AWS Default Encryption for S3 Bucket", "verbs": [ "apply" ] }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Dict\n", "import json\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_put_bucket_encryption_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_put_bucket_encryption(handle, name: str, region: str) -> Dict:\n", " \"\"\"aws_put_bucket_encryption Puts default encryption configuration for bucket.\n", "\n", " :type name: string\n", " :param name: NAme of the S3 bucket.\n", "\n", " :type region: string\n", " :param region: location of the bucket\n", "\n", " :rtype: Dict with the response info.\n", " \"\"\"\n", " s3Client = handle.client('s3',\n", " region_name=region)\n", "\n", " # Setup default encryption configuration \n", " response = s3Client.put_bucket_encryption(\n", " Bucket=name,\n", " ServerSideEncryptionConfiguration={\n", " \"Rules\": [\n", " {\"ApplyServerSideEncryptionByDefault\": {\"SSEAlgorithm\": \"AES256\"}}\n", " ]},\n", " )\n", " return response\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\",\n", " \"name\": \"iter.get(\\\\\"bucket\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"bucket_list\",\n", " \"iter_parameter\": [\"name\",\"region\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(bucket_list) > 0\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"apply_output\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_put_bucket_encryption, lego_printer=aws_put_bucket_encryption_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "dea3003f-03e9-4dff-86fb-b4073ee4ef79", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "### Conclusion\n", "Output variable:
\n", "apply_output
In this Runbook, we demonstrated the use of unSkript's AWS legos to filter all unencrypted S3 buckets and apply default encryption configuration to the buckets. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Remediate unencrypted S3 buckets", "parameters": [ "bucket_name", "region" ] }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "parameterSchema": { "properties": { "bucket_names": { "description": "list of S3 bucket Names", "title": "bucket_names", "type": "string" }, "region": { "description": "AWS Region e.g. us-west-2", "title": "region", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": {}, "vscode": { "interpreter": { "hash": "5e269198fab4eb2ea6fe7c886c38b87b334869f0501ab924e1d16d60aeba5d23" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Remediate_unencrypted_S3_buckets.json ================================================ { "name": "Remediate unencrypted S3 buckets", "description": "This runbook can be used to filter all the S3 buckets which are unencrypted and apply encryption on unencrypted S3 buckets.", "uuid": "50d9c6abd7dce3ff9183d4135353e82859bc5a9639455b35bd229331be6048df", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS","CATEGORY_TYPE_SECOPS" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Renew_SSL_Certificate.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "b18495bb-19ba-4b43-9824-8739dd304b90", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) List expiring ACM certificates
2) Renew expiring ACM certificates
Using unSkript's List expiring ACM certificates action, we will fetch all the expiring certificates given a specific number of threshold days.
\n", "\n", "\n", "This action takes the following parameters:
\n", "threshold_days,region(Optional)
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "4087f95e-aca3-4eb9-95c0-acf50a778c5a", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ACM" ], "actionDescription": "List All Expiring ACM Certificates", "actionEntryFunction": "aws_list_expiring_acm_certificates", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "76681732b20a69913f0d9248272271bf2f4ab6459498ec6d0ab055870e0db0bb" ], "actionNextHopParameterMapping": {}, "actionNouns": [ "expiring", "certificates", "aws" ], "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "List Expiring ACM Certificates", "actionType": "LEGO_TYPE_AWS", "actionVerbs": [ "list" ], "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "c1ee1c3b5cb0e07f0b52ca4d853aba6b3e597882e785ea054f95d69c03d83973", "collapsed": true, "continueOnError": false, "credentialsJson": {}, "description": "List All Expiring ACM Certificates", "id": 4, "index": 4, "inputData": [ { "region": { "constant": false, "value": "region" }, "threshold_days": { "constant": false, "value": "int(threshold_days)" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "Name of the AWS Region", "title": "Region", "type": "string" }, "threshold_days": { "description": "Threshold number(in days) to check for expiry. Eg: 30 -lists all certificates which are expiring within 30 days", "title": "Threshold Days", "type": "integer" } }, "required": [ "threshold_days" ], "title": "aws_list_expiring_acm_certificates", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "List Expiring ACM Certificates", "orderProperties": [ "threshold_days", "region" ], "outputParams": { "output_name": "expiring_certificates", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "tags": [ "aws_list_expiring_acm_certificates" ], "uuid": "c1ee1c3b5cb0e07f0b52ca4d853aba6b3e597882e785ea054f95d69c03d83973", "version": "1.0.0" }, "outputs": [], "source": [ "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "import pprint\n", "from typing import Optional,Tuple\n", "import datetime\n", "import dateutil\n", "from pydantic import BaseModel, Field\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_list_expiring_acm_certificates_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def aws_list_expiring_acm_certificates(handle, threshold_days: int = 90, region: str=None)-> Tuple:\n", " \"\"\"aws_list_expiring_acm_certificates returns all the ACM issued certificates which\n", " are about to expire given a threshold number of days\n", "\n", " :type handle: object\n", " :param handle: Object returned from Task Validate\n", "\n", " :type threshold_days: int\n", " :param threshold_days: Threshold number of days to check for expiry.\n", " Eg: 30 -lists all certificates which are expiring within 30 days\n", "\n", " :type region: str\n", " :param region: Region name of the AWS account\n", "\n", " :rtype: Tuple containing status, expiring certificates, and error\n", " \"\"\"\n", " arn_list=[]\n", " domain_list = []\n", " expiring_certificates_list= []\n", " expiring_certificates_dict={}\n", " result_list=[]\n", " all_regions = [region]\n", " if region is None or len(region)==0:\n", " all_regions = aws_list_all_regions(handle=handle)\n", " for r in all_regions:\n", " iamClient = handle.client('acm', region_name=r)\n", " try:\n", " expiring_certificates_dict={}\n", " certificates_list = iamClient.list_certificates(CertificateStatuses=['ISSUED'])\n", " for each_arn in certificates_list['CertificateSummaryList']:\n", " arn_list.append(each_arn['CertificateArn'])\n", " domain_list.append(each_arn['DomainName'])\n", " for cert_arn in arn_list:\n", " details = iamClient.describe_certificate(CertificateArn=cert_arn)\n", " for key,value in details['Certificate'].items():\n", " if key == \"NotAfter\":\n", " expiry_date = value\n", " right_now = datetime.datetime.now(dateutil.tz.tzlocal())\n", " diff = expiry_date-right_now\n", " days_remaining = diff.days\n", " if 0 < days_remaining < threshold_days:\n", " expiring_certificates_list.append(cert_arn)\n", " expiring_certificates_dict[\"region\"]= r\n", " expiring_certificates_dict[\"certificate\"]= expiring_certificates_list\n", " if len(expiring_certificates_list)!=0:\n", " result_list.append(expiring_certificates_dict)\n", " except Exception:\n", " pass\n", " if len(result_list)!=0:\n", " return (False, result_list)\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"threshold_days\": \"int(threshold_days)\",\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(outputName=\"expiring_certificates\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_list_expiring_acm_certificates, lego_printer=aws_list_expiring_acm_certificates_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "33e6d07d-2168-44d1-99fe-32539f26758f", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "expiring_certificates
This action filters regions that have no certificates and creates a list of certificates that have to be renewed
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": 6, "id": "999b1c0b-701f-4207-b80f-2a5a1ce7578d", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-02T16:16:02.763Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Expiring Certificates", "orderProperties": [], "tags": [], "title": "Create List of Expiring Certificates" }, "outputs": [], "source": [ "all_expiring_certificates = []\n", "try:\n", " if expiring_certificates[0] == False:\n", " if len(expiring_certificates[1])!=0:\n", " all_expiring_certificates=expiring_certificates[1]\n", "except Exception:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"certificate\"] = certificate_arns\n", " all_expiring_certificates.append(data_dict)\n", "print(all_expiring_certificates)" ] }, { "cell_type": "markdown", "id": "45f6a4b4-f896-4e37-9fb6-3c6db915495e", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_expiring_certificates
This action renews eligible SSL certificates that are available on ACM. Only exported private certificates can be renewed with this operation. In order to renew your AWS Private CA certificates with ACM, you must first grant the ACM service principal permission to do so.
A certificate is eligible for automatic renewal subject to the following considerations:
1) ELIGIBLE if associated with another AWS service, such as Elastic Load Balancing or CloudFront.
2) ELIGIBLEif exported since being issued or last renewed.
3) ELIGIBLE if it is a private certificate issued by calling the ACM RequestCertificate API and then exported or associated with another AWS service.
4) ELIGIBLE if it is a private certificate issued through the management console and then exported or associated with another AWS service.
5) NOT ELIGIBLE if it is a private certificate issued by calling the AWS Private CA IssueCertificate API.
6) NOT ELIGIBLE if imported or already expired
\n", "\n", "This action takes the following parameters:
\n", "aws_certificate_arn,region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "dd7da102-0ea1-4d13-a87b-c4e7af382228", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ACM" ], "actionDescription": "Renew Expiring ACM Certificates", "actionEntryFunction": "aws_renew_expiring_acm_certificates", "actionIsCheck": false, "actionIsRemediation": true, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": [ "certificates", "acm", "aws" ], "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Renew Expiring ACM Certificates", "actionType": "LEGO_TYPE_AWS", "actionVerbs": [ "renew" ], "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "89773c9cb2201505fbf5dbac0cc34a4056ba1a45a315addffec9af7a4b9b7390", "collapsed": true, "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "Renew Expiring ACM Certificates", "id": 1, "index": 1, "inputData": [ { "aws_certificate_arn": { "constant": false, "value": "\"iter.get(\\\\\"certificate\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "aws_certificate_arn": { "description": "ARN of the Certificate", "items": {}, "title": "Certificate ARN", "type": "array" }, "region": { "description": "Name of the AWS Region", "title": "Region", "type": "string" } }, "required": [ "aws_certificate_arn", "region" ], "title": "aws_renew_expiring_acm_certificates", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "aws_certificate_arn": "certificate", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_expiring_certificates" } } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Renew Expiring ACM Certificates", "orderProperties": [ "aws_certificate_arn", "region" ], "printOutput": true, "startcondition": "len(all_expiring_certificates)!=0", "tags": [ "aws_renew_expiring_acm_certificates" ], "uuid": "89773c9cb2201505fbf5dbac0cc34a4056ba1a45a315addffec9af7a4b9b7390", "version": "1.0.0" }, "outputs": [], "source": [ "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "import pprint\n", "from typing import Dict, List\n", "from pydantic import BaseModel, Field\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_renew_expiring_acm_certificates_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def aws_renew_expiring_acm_certificates(handle, aws_certificate_arn: List, region: str='') -> Dict:\n", " \"\"\"aws_renew_expiring_acm_certificates returns all the ACM issued certificates\n", " which are about to expire given a threshold number of days\n", "\n", " :type handle: object\n", " :param handle: Object returned from Task Validate\n", "\n", " :type aws_certificate_arn: List\n", " :param aws_certificate_arn: ARN of the Certificate\n", "\n", " :type region: str\n", " :param region: Region name of the AWS account\n", "\n", " :rtype: Result Dictionary of result\n", " \"\"\"\n", " result = {}\n", " try:\n", " acmClient = handle.client('acm', region_name=region)\n", " for arn in aws_certificate_arn:\n", " acmClient.renew_certificate(CertificateArn=arn)\n", " result[arn] = \"Successfully renewed\"\n", " except Exception as e:\n", " result[\"error\"] = e\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"aws_certificate_arn\": \"iter.get(\\\\\"certificate\\\\\")\",\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_expiring_certificates\",\n", " \"iter_parameter\": [\"aws_certificate_arn\",\"region\"]\n", " }''')\n", "\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_expiring_certificates)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_renew_expiring_acm_certificates, lego_printer=aws_renew_expiring_acm_certificates_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "14ce7477-5f71-4127-8477-43b76473590b", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action captures the following output:
\n", "None
In this Runbook, we demonstrated the use of unSkript's AWS actions to list all expiring ACM SSL Certificates and subsequently renewed them. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Renew AWS SSL Certificates that are close to expiration", "parameters": [ "region", "threshold_days" ] }, "kernelspec": { "display_name": "unSkript (Build: 1169)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "parameterSchema": { "properties": { "certificate_arns": { "description": "List of AWS ACM Certificates", "title": "certificate_arns", "type": "array" }, "region": { "description": "AWS region which have Certificates. Eg: \"us-west-2\"", "title": "region", "type": "string" }, "threshold": { "default": 90, "description": "Threshold number of days to check if a certificate is nearing it's expiry. Eg:45", "title": "threshold", "type": "number" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": {}, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Renew_SSL_Certificate.json ================================================ { "name": "Renew AWS SSL Certificates that are close to expiration", "description": "This runbook can be used to list all AWS SSL (ACM) Certificates that need to be renewed within a given threshold number of days. Optionally it can renew the certificate using AWS ACM service.", "uuid": "76681732b20a69913f0d9248272271bf2f4ab6459498ec6d0ab055870e0db0bb", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SECOPS" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Restart_Unhealthy_Services_Target_Group.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "60338620-97a9-4b89-9897-f6ff0b25a8a2", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1. List Unhealthy Instances in a Target Group
2. Restart EC2 instances
Here we will fetch all the untagged EC2 instances.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region(Optional)
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "683e6a7b-a04c-4298-987b-0e304b994906", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_TROUBLESHOOTING", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ELB" ], "actionDescription": "List Unhealthy Instances in a target group", "actionEntryFunction": "aws_list_unhealthy_instances_in_target_group", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "7a5cf9629c56eb979a01977330c3d2df656e965a78323be4fa49fdc3b527c9d7" ], "actionNextHopParameterMapping": { "region": ".[].region" }, "actionNouns": [ "unhealthy", "instances", "target", "group", "aws" ], "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS List Unhealthy Instances in a Target Group", "actionType": "LEGO_TYPE_AWS", "actionVerbs": [ "list" ], "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "6f7558325461f2ef5ec668dbf6356f199b20b606eba684e74764e1a16e46cd0d", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "List Unhealthy Instances in a target group", "execution_data": { "last_date_success_run_cell": "2023-05-18T14:17:21.320Z" }, "id": 13, "index": 13, "inputData": [ { "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "Name of the AWS Region", "title": "Region", "type": "string" } }, "required": [], "title": "aws_list_unhealthy_instances_in_target_group", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS List Unhealthy Instances in a Target Group", "orderProperties": [ "region" ], "outputParams": { "output_name": "unhealthy_instances", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not instance_ids", "tags": [], "uuid": "6f7558325461f2ef5ec668dbf6356f199b20b606eba684e74764e1a16e46cd0d", "version": "1.0.0" }, "outputs": [], "source": [ "import pprint\n", "from typing import Optional, Tuple\n", "from pydantic import BaseModel, Field\n", "from unskript.connectors.aws import aws_get_paginator\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "from unskript.legos.utils import parseARN\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_list_unhealthy_instances_in_target_group_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def get_all_target_groups(handle, r):\n", " target_arns_list = []\n", " elbv2Client = handle.client('elbv2', region_name=r)\n", " try:\n", " tbs = aws_get_paginator(elbv2Client, \"describe_target_groups\", \"TargetGroups\")\n", " for index, tb in enumerate(tbs):\n", " target_arns_list.append(tb.get('TargetGroupArn'))\n", " except Exception:\n", " pass\n", " return target_arns_list\n", "\n", "@beartype\n", "def aws_list_unhealthy_instances_in_target_group(handle, region: str=None) -> Tuple:\n", " result = []\n", " unhealthy_instances_list = []\n", " all_target_groups = []\n", " unhealhthy_instances_dict ={}\n", " all_regions = [region]\n", " if region is None or len(region)==0:\n", " all_regions = aws_list_all_regions(handle=handle)\n", " for r in all_regions:\n", " try:\n", " output = get_all_target_groups(handle,r)\n", " if len(output)!=0:\n", " all_target_groups.append(output)\n", " except Exception as e:\n", " print(\"????????\")\n", " pass\n", " for target_group in all_target_groups:\n", " for o in target_group:\n", " parsedArn = parseARN(o)\n", " region_name = parsedArn['region']\n", " elbv2Client = handle.client('elbv2', region_name=region_name)\n", " try:\n", " targetHealthResponse = elbv2Client.describe_target_health(TargetGroupArn=o)\n", " except Exception as e:\n", " raise e\n", " for ins in targetHealthResponse[\"TargetHealthDescriptions\"]:\n", " if ins['TargetHealth']['State'] in ['unhealthy']:\n", " unhealthy_instances_list.append(ins['Target']['Id'])\n", " if len(unhealthy_instances_list)!=0:\n", " unhealhthy_instances_dict['instance'] = unhealthy_instances_list\n", " unhealhthy_instances_dict['region'] = region_name\n", " result.append(unhealhthy_instances_dict)\n", " if len(result)!=0:\n", " return (False,result)\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not instance_ids\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"unhealthy_instances\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_list_unhealthy_instances_in_target_group, lego_printer=aws_list_unhealthy_instances_in_target_group_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "0e938725-b388-4c57-87b1-fd2e4719f0e1", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 Extension", "orderProperties": [], "tags": [], "title": "Step-1 Extension" }, "source": [ "This action captures the following output:
\n", "unheathy_instances
\n", "" ] }, { "cell_type": "code", "execution_count": 28, "id": "ad13b804-ad7f-433e-8910-d01d679a262a", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-05-18T16:16:12.444Z" }, "jupyter": { "source_hidden": true }, "name": "Create List of unhealthy instances", "orderProperties": [], "tags": [], "title": "Create List of unhealthy instances" }, "outputs": [], "source": [ "all_unhealthy_instances = []\n", "try:\n", " if unhealthy_instances[0] == False:\n", " for each_instance in unhealthy_instances[1]:\n", " all_unhealthy_instances.append(each_instance)\n", "except Exception as e:\n", " if instance_ids:\n", " for instance in instance_ids:\n", " instance_dict = {}\n", " instance_dict[\"instance\"] = instance\n", " instance_dict[\"region\"] = region\n", " all_unhealthy_instances.append(instance_dict)\n", " else:\n", " raise Exception(e)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "464a91c4-371f-426e-a6d6-32c2266d42e4", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_unhealthy_instances
Here we will restart all the unhealthy EC2 instances.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region, instance_ids
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "23059dc8-f854-4301-a557-c62683a0d045", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "e7d021a8e955291cf31e811e64a86baa2a902ea2185cb76e7121ebbab261c320", "checkEnabled": false, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Restart AWS EC2 Instances", "id": 250, "index": 250, "inputData": [ { "instance_ids": { "constant": false, "value": "\"iter.get(\\\\\"instance\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "instance_ids": { "description": "List of instance IDs. For eg. [\"i-foo\", \"i-bar\"]", "items": { "type": "string" }, "title": "Instance IDs", "type": "array" }, "region": { "description": "AWS Region of the instances.", "title": "Region", "type": "string" } }, "required": [ "instance_ids", "region" ], "title": "aws_restart_ec2_instances", "type": "object" } ], "isUnskript": false, "iterData": [ { "iter_enabled": true, "iter_item": { "instance_ids": "instance", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_unhealthy_instances" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Restart AWS EC2 Instances", "nouns": [], "orderProperties": [ "instance_ids", "region" ], "output": { "type": "" }, "printOutput": true, "tags": [], "verbs": [] }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import List, Dict\n", "from pydantic import BaseModel, Field\n", "import pprint\n", "from beartype import beartype\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_restart_ec2_instances_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_restart_ec2_instances(handle, instance_ids: List, region: str) -> Dict:\n", " \"\"\"aws_restart_instances Restarts instances.\n", "\n", " :type handle: object\n", " :param handle: Object returned by the task.validate(...) method.\n", "\n", " :type instance_ids: list\n", " :param instance_ids: List of instance ids.\n", "\n", " :type region: string\n", " :param region: Region for instance.\n", "\n", " :rtype: Dict with the restarted instances info.\n", " \"\"\"\n", "\n", " ec2Client = handle.client('ec2', region_name=region)\n", " res = ec2Client.reboot_instances(InstanceIds=instance_ids)\n", " return res\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"instance_ids\": \"iter.get(\\\\\"instance\\\\\")\",\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_unhealthy_instances\",\n", " \"iter_parameter\": [\"instance_ids\",\"region\"]\n", " }''')\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_restart_ec2_instances, lego_printer=aws_restart_ec2_instances_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "ae582460-4ae2-4d66-8328-1fb1deb238c3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action captures the following output:
\n", "None
In this Runbook, we were able to restart all unhealthy EC2 instances in a target group using unSkript's AWS actions. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "AWS Restart unhealthy services in a Target Group", "parameters": [ "region" ] }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "parameterSchema": { "properties": { "instance_ids": { "description": "List of AWS EC2 instance.", "title": "instance_ids", "type": "array" }, "region": { "description": "AWS region(s) to get the target groups from. Eg: us-west-2", "title": "region", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Restart_Unhealthy_Services_Target_Group.json ================================================ { "name": "AWS Restart unhealthy services in a Target Group", "description": "This runbook restarts unhealthy services in a target group. The restart command is provided via a tag attached to the instance.", "uuid": "7a5cf9629c56eb979a01977330c3d2df656e965a78323be4fa49fdc3b527c9d7", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_TROUBLESHOOTING" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Restrict_S3_Buckets_with_READ_WRITE_Permissions.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "c92fbc7c-b9b3-4fd9-8f55-9811f3580311", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Filter Public S3 buckets with ACL Permissions
2) Change the permissions to private
This action will fetch all public S3 buckets with the chosen permissions- \"READ\",\"READ_ACP\",\"WRITE\",\"WRITE_ACP\", and \"FULL_CONTROL\", If no permissions are given, the action will execute for READ and WRITE.
\n", "\n", "\n", "This action takes the following parameters:
\n", "bucket_permission(Optional),region(Optional)
\n", "" ] }, { "cell_type": "code", "execution_count": 4, "id": "6b5c887b-254a-4790-9eaf-9e320615bd75", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_S3" ], "actionDescription": "Get AWS public S3 Buckets using ACL", "actionEntryFunction": "aws_filter_public_s3_buckets_by_acl", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "305fe6a6f0512eb2d91b71c508b3a192e5b7021bf8196f4deeec5397f2b85e84" ], "actionNextHopParameterMapping": {}, "actionNouns": [ "aws", "s3", "public", "buckets", "by", "acl" ], "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Get AWS public S3 Buckets using ACL", "actionType": "LEGO_TYPE_AWS", "actionVerbs": [ "filter" ], "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "b13d82d445e9064eb3cb88ca6247696ee3e7bfceb02b617833992f8552bf48fb", "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "Get AWS public S3 Buckets using ACL", "execution_data": { "last_date_success_run_cell": "2023-05-19T07:05:35.678Z" }, "id": 4, "index": 4, "inputData": [ { "permission": { "constant": true, "value": "bucket_permission" }, "region": { "constant": false, "value": "\"us-west-2\"" } } ], "inputschema": [ { "definitions": { "BucketACLPermissions": { "description": "An enumeration.", "enum": [ "READ", "WRITE", "READ_ACP", "WRITE_ACP", "FULL_CONTROL" ], "title": "BucketACLPermissions", "type": "string" } }, "properties": { "permission": { "allOf": [ { "$ref": "#/definitions/BucketACLPermissions" } ], "default": "READ", "description": "Set of permissions that AWS S3 supports in an ACL for buckets and objects", "title": "S3 Bucket's ACL Permission", "type": "enum" }, "region": { "default": "", "description": "Name of the AWS Region", "title": "Region", "type": "string" } }, "required": [], "title": "aws_filter_public_s3_buckets_by_acl", "type": "object" } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Get AWS public S3 Buckets using ACL", "orderProperties": [ "region", "permission" ], "outputParams": { "output_name": "public_buckets", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not bucket_names", "tags": [ "aws_filter_public_s3_buckets_by_acl" ], "uuid": "b13d82d445e9064eb3cb88ca6247696ee3e7bfceb02b617833992f8552bf48fb", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import List, Optional, Tuple\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "from unskript.legos.aws.aws_get_s3_buckets.aws_get_s3_buckets import aws_get_s3_buckets\n", "from unskript.enums.aws_acl_permissions_enums import BucketACLPermissions\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_filter_public_s3_buckets_by_acl_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def check_publicly_accessible_buckets(s3Client,b,all_permissions):\n", " public_check = [\"http://acs.amazonaws.com/groups/global/AuthenticatedUsers\",\n", " \"http://acs.amazonaws.com/groups/global/AllUsers\"]\n", " public_buckets = False\n", " try:\n", " res = s3Client.get_bucket_acl(Bucket=b)\n", " for perm in all_permissions:\n", " for grant in res[\"Grants\"]:\n", " if 'Permission' in grant.keys() and perm == grant[\"Permission\"]:\n", " if 'URI' in grant[\"Grantee\"] and grant[\"Grantee\"][\"URI\"] in public_check:\n", " public_buckets = True\n", " except Exception as e:\n", " pass\n", " return public_buckets\n", "\n", "@beartype\n", "def aws_filter_public_s3_buckets_by_acl(handle, permission:BucketACLPermissions=BucketACLPermissions.READ, region: str=None) -> Tuple:\n", " \"\"\"aws_filter_public_s3_buckets_by_acl get list of public buckets.\n", "\n", " Note- By default(if no permissions are given) READ and WRITE ACL Permissioned S3 buckets are checked for public access. Other ACL Permissions are - \"READ_ACP\"|\"WRITE_ACP\"|\"FULL_CONTROL\"\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...)\n", "\n", " :type permission: Enum\n", " :param permission: Set of permissions that AWS S3 supports in an ACL for buckets and objects.\n", "\n", " :type region: string\n", " :param region: location of the bucket.\n", "\n", " :rtype: Object with status, list of public S3 buckets with READ/WRITE ACL Permissions, and errors\n", " \"\"\"\n", " all_permissions = [permission]\n", " if permission is None or len(permission)==0:\n", " all_permissions = [\"READ\",\"WRITE\"]\n", " result = []\n", " all_buckets = []\n", " all_regions = [region]\n", " if region is None or len(region)==0:\n", " all_regions = aws_list_all_regions(handle=handle)\n", " try:\n", " for r in all_regions:\n", " s3Client = handle.client('s3',region_name=r)\n", " output = aws_get_s3_buckets(handle=handle, region=r)\n", " if len(output)!= 0:\n", " for o in output:\n", " all_buckets_dict = {}\n", " all_buckets_dict[\"region\"]=r\n", " all_buckets_dict[\"bucket\"]=o\n", " all_buckets.append(all_buckets_dict)\n", " except Exception as e:\n", " raise e\n", "\n", " for bucket in all_buckets:\n", " s3Client = handle.client('s3',region_name= bucket['region'])\n", " flag = check_publicly_accessible_buckets(s3Client,bucket['bucket'], all_permissions)\n", " if flag:\n", " result.append(bucket)\n", " if len(result)!=0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"permission\": \"bucket_permission\",\n", " \"region\": \"\\\\\"us-west-2\\\\\"\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not bucket_names\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"public_buckets\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_filter_public_s3_buckets_by_acl, lego_printer=aws_filter_public_s3_buckets_by_acl_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "24c71589-028b-4d3b-908f-ce867b462f7a", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "public_buckets
This action filters regions that have no public buckets and creates a list of public buckets that have are to be made private.
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": 25, "id": "fa0655b5-e142-445c-9a39-312b4ee9f3f6", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-02T15:56:00.421Z" }, "jupyter": { "source_hidden": true }, "name": "Create List of public S3 buckets", "orderProperties": [], "tags": [], "title": "Create List of public S3 buckets" }, "outputs": [], "source": [ "all_public_buckets = []\n", "try:\n", " if public_buckets[0] == False:\n", " if len(public_buckets[1])!=0:\n", " all_public_buckets=public_buckets[1]\n", "except Exception:\n", " for buck in bucket_names:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"bucket\"] = buck\n", " all_public_buckets.append(data_dict)\n", "print(all_public_buckets)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "b49c03e9-2951-4fab-b5f5-5338b8a955f9", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_public_buckets
Using unSkript's AWS Change ACL Permission of public S3 Bucket action, we will fchange the permissions of the bucket to private, public-read, public-read-write, authenticated-read. If no canned_acl_permission is selected, private will be set by default.
\n", "\n", "\n", "This action takes the following parameters:
\n", "bucket_name,region,canned_acl_permission
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "a0896792-2764-4e3e-ab44-82f234e1c5f7", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_S3" ], "actionDescription": "AWS Change ACL Permission public S3 Bucket", "actionEntryFunction": "aws_change_acl_permissions_of_buckets", "actionIsCheck": false, "actionIsRemediation": true, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Change ACL Permission of public S3 Bucket", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "305fe6a6f0512eb2d91b71c508b3a192e5b7021bf8196f4deeec5397f2b85e84", "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "AWS Change ACL Permission public S3 Bucket", "id": 3, "index": 3, "inputData": [ { "acl": { "constant": true, "value": "acl_permission" }, "bucket_name": { "constant": false, "value": "\"iter.get(\\\\\"bucket_name\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "definitions": { "CannedACLPermissions": { "description": "An enumeration.", "enum": [ "Private", "PublicRead", "PublicReadWrite", "AuthenticatedRead" ], "title": "CannedACLPermissions", "type": "string" } }, "properties": { "acl": { "allOf": [ { "$ref": "#/definitions/CannedACLPermissions" } ], "description": "Canned ACL Permission type - 'private'|'public-read'|'public-read-write'|'authenticated-read'.", "title": "Canned ACL Permission", "type": "enum" }, "bucket_name": { "description": "AWS S3 Bucket Name.", "title": "Bucket Name", "type": "string" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [ "region", "bucket_name" ], "title": "aws_change_acl_permissions_of_buckets", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "bucket_name": "bucket_name", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_public_buckets" } } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Change ACL Permission of public S3 Bucket", "orderProperties": [ "region", "bucket_name", "acl" ], "printOutput": true, "startcondition": "len(all_public_buckets)!=0", "tags": [ "aws_change_acl_permissions_of_buckets" ], "uuid": "305fe6a6f0512eb2d91b71c508b3a192e5b7021bf8196f4deeec5397f2b85e84", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "import pprint\n", "from typing import Optional, Dict\n", "from pydantic import BaseModel, Field\n", "from unskript.enums.aws_canned_acl_enums import CannedACLPermissions\n", "\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_change_acl_permissions_of_buckets_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_change_acl_permissions_of_buckets(\n", " handle,\n", " bucket_name: str,\n", " acl: CannedACLPermissions=CannedACLPermissions.Private,\n", " region: str = None\n", " ) -> Dict:\n", " \"\"\" aws_put_bucket_acl get Dict of buckets ACL change info.\n", "\n", " :type handle: Session\n", " :param handle: Object returned by the task.validate(...) method\n", "\n", " :type bucket_name: string\n", " :param bucket_name: S3 bucket name where to set ACL on.\n", "\n", " :type acl: CannedACLPermissions\n", " :param acl: Canned ACL Permission type - 'private'|'public-read'|'public-read-write\n", " '|'authenticated-read'.\n", "\n", " :type region: string\n", " :param region: location of the bucket.\n", "\n", " :rtype: Dict of buckets ACL change info\n", " \"\"\"\n", " # connect to the S3 using client\n", " all_permissions = acl\n", " if acl is None or len(acl)==0:\n", " all_permissions = \"private\"\n", " s3Client = handle.client('s3',\n", " region_name=region)\n", "\n", " # Put bucket ACL for the permissions grant\n", " response = s3Client.put_bucket_acl(\n", " Bucket=bucket_name,\n", " ACL=all_permissions )\n", "\n", " return response\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"\\\\\"iter.get(\\\\\\\\\\\\\"region\\\\\\\\\\\\\")\\\\\"\",\n", " \"bucket_name\": \"\\\\\"iter.get(\\\\\\\\\\\\\"bucket_name\\\\\\\\\\\\\")\\\\\"\",\n", " \"acl\": \"acl_permission\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_public_buckets\",\n", " \"iter_parameter\": [\"region\",\"bucket_name\"]\n", " }''')\n", "\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_public_buckets)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_change_acl_permissions_of_buckets, lego_printer=aws_change_acl_permissions_of_buckets_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "eada3017-32cf-46e2-b02c-4eb60256a3a9", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action captures the following output:
\n", "None
In this Runbook, we were able to restrict S3 buckets having read and write permissions to private. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Restrict S3 Buckets with READ/WRITE Permissions to all Authenticated Users", "parameters": null }, "kernelspec": { "display_name": "unSkript (Build: 1169)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "parameterSchema": { "properties": { "acl_permission": { "default": "private", "description": "Canned ACL Permission type - Eg: 'private'|'public-read'|'public-read-write'|'authenticated-read'", "enum": [ "private", "public-read", "public-read-write" ], "enumNames": [ "private", "public-read", "public-read-write" ], "title": "acl_permission", "type": "string" }, "bucket_names": { "description": "List of S3 bucket names.", "title": "bucket_names", "type": "array" }, "bucket_permission": { "default": "READ", "description": "Set of permissions that AWS S3 supports in an ACL for buckets and objects. Eg:\"READ\",\"WRITE_ACP\",\"FULL_CONTROL\"", "enum": [ "READ", "WRITE", "READ_ACP" ], "enumNames": [ "READ", "WRITE", "READ_ACP" ], "title": "bucket_permission", "type": "string" }, "region": { "description": "AWS Region to get the buckets from. Eg:us-west-2\"", "title": "region", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Restrict_S3_Buckets_with_READ_WRITE_Permissions.json ================================================ { "name": "Restrict S3 Buckets with READ/WRITE Permissions to all Authenticated Users", "description": "This runbook will list all the S3 buckets.Filter buckets which has ACL public READ/WRITE permissions and Change the ACL Public READ/WRITE permissions to private in the given region.", "uuid": "750987144b20d7b5984a37e58c2e17b69fd33f799a1f027f0ff7532cee5913c6", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Secure_Publicly_Accessible_RDS_Instances.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "c0e8284f-f6a8-4b7f-971c-8fb037002354", "metadata": { "jupyter": { "source_hidden": false }, "name": "Runbook Overview", "orderProperties": [], "tags": [], "title": "Runbook Overview" }, "source": [ "Using unSkript's AWS Get Publicly Accessible RDS Instances action, we will get all the publicly accessible instances from RDS instances.
\n", "\n", "\n", "Input parameters:
\n", "region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "7f6a6416-23f4-42d0-8d3c-dad850450f9e", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_RDS" ], "actionDescription": "AWS Get Publicly Accessible RDS Instances", "actionEntryFunction": "aws_get_publicly_accessible_db_instances", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "dda26fd556dd6b59e2fac9c9ed6e81fc19e5374746049d494237bcdc6a17fae4" ], "actionNextHopParameterMapping": { "dda26fd556dd6b59e2fac9c9ed6e81fc19e5374746049d494237bcdc6a17fae4": { "name": "Publicly Accessible Amazon RDS Instances", "region": ".[0].region" } }, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Get Publicly Accessible RDS Instances", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "97bfc082be1cffdf5c795b3119bfa90b36946934b37cf213d762e0ee3ee881f8", "condition_enabled": true, "credentialsJson": {}, "description": "AWS Get Publicly Accessible RDS Instances", "execution_count": {}, "execution_data": {}, "id": 3, "index": 3, "inputschema": [ { "properties": { "region": { "default": "", "description": "Region of the RDS.", "title": "Region for RDS", "type": "string" } }, "title": "aws_get_publicly_accessible_db_instances", "type": "object" } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Get Publicly Accessible RDS Instances", "orderProperties": [ "region" ], "outputParams": { "output_name": "public_rds_instances", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not rds_instances", "tags": [ "aws_get_publicly_accessible_db_instances" ], "uuid": "97bfc082be1cffdf5c795b3119bfa90b36946934b37cf213d762e0ee3ee881f8", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "import pprint\n", "from typing import Optional, Tuple\n", "from pydantic import BaseModel, Field\n", "from unskript.legos.utils import CheckOutput\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "from unskript.connectors.aws import aws_get_paginator\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_publicly_accessible_db_instances_printer(output):\n", " if output is None:\n", " return\n", "\n", " if isinstance(output, CheckOutput):\n", " print(output.json())\n", " else:\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_get_publicly_accessible_db_instances(handle, region: str = \"\") -> Tuple:\n", " \"\"\"aws_get_publicly_accessible_db_instances Gets all publicly accessible DB instances\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: string\n", " :param region: Region of the RDS.\n", "\n", " :rtype: CheckOutput with status result and list of publicly accessible RDS instances.\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", " for reg in all_regions:\n", " try:\n", " ec2Client = handle.client('rds', region_name=reg)\n", " response = aws_get_paginator(ec2Client, \"describe_db_instances\", \"DBInstances\")\n", " for db in response:\n", " db_instance_dict = {}\n", " if db['PubliclyAccessible']:\n", " db_instance_dict[\"region\"] = reg\n", " db_instance_dict[\"instance\"] = db['DBInstanceIdentifier']\n", " result.append(db_instance_dict)\n", " except Exception:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not rds_instances\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(credentialsJson='''{\\\"credential_type\\\": \\\"CONNECTOR_TYPE_AWS\\\"}''')\n", "\n", "task.configure(outputName=\"public_rds_instances\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_publicly_accessible_db_instances, lego_printer=aws_get_publicly_accessible_db_instances_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "d56e5ae8-9277-4615-a3a9-dda4f55955bf", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 1A", "orderProperties": [], "tags": [], "title": "Step 1A" }, "source": [ "Output variable:
\n", "rds_instances
In this action, we modify the output from step 1 and return a list of dictionary items for the publicly accessible RDS instances.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "be5f0054-e0f8-40e7-b797-993033a3fe04", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-08-16T09:07:53.288Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Public RDS Instances", "orderProperties": [], "tags": [], "title": "Create List of Public RDS Instances" }, "outputs": [], "source": [ "all_public_instances = []\n", "try:\n", " if public_rds_instances[0] == False:\n", " for instance in public_rds_instances[1]:\n", " all_public_instances.append(instance)\n", "except Exception as e:\n", " if rds_instances:\n", " for ins in rds_instances:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"instance\"] = ins\n", " all_public_instances.append(data_dict)\n", " else:\n", " raise Exception(e)\n", "print(all_public_instances)" ] }, { "cell_type": "markdown", "id": "a518b936-dc13-4a56-962d-1595f7c74b71", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "Output variable: all_public_instances
\n", "
Using unSkript's Modify Publicly Accessible RDS Instances action we will modify the access to all the publicly accessible instances from the public to private.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "1b4ad0cc-6140-4f6f-a06e-c894b583cb99", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_RDS" ], "actionDescription": "Change public accessibility of RDS Instances to False.", "actionEntryFunction": "aws_make_rds_instance_not_publicly_accessible", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_STR", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Disallow AWS RDS Instance public accessibility", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "15d2e1417496ecb13e7bb88d7429f74dabbb6f8b9bc7d9df275647eae402e4dd", "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "Change public accessibility of RDS Instances to False.", "execution_count": {}, "execution_data": {}, "id": 7, "index": 7, "inputData": [ { "db_instance_identifier": { "constant": false, "value": "\"iter.get(\\\\\"instance\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "db_instance_identifier": { "description": "The DB instance identifier for the DB instance to be deleted. This parameter isn’t case-sensitive.", "title": "RDS Instance Identifier", "type": "string" }, "region": { "description": "AWS region of instance identifier", "title": "AWS Region", "type": "string" } }, "required": [ "db_instance_identifier", "region" ], "title": "aws_make_rds_instance_not_publicly_accessible", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "db_instance_identifier": "instance", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_public_instances" } } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Disallow AWS RDS Instance public accessibility", "orderProperties": [ "db_instance_identifier", "region" ], "printOutput": true, "startcondition": "len(all_public_instances)!=0", "tags": [ "aws_make_rds_instance_not_publicly_accessible" ], "uuid": "15d2e1417496ecb13e7bb88d7429f74dabbb6f8b9bc7d9df275647eae402e4dd", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2023 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_make_rds_instance_not_publicly_accessible_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def aws_make_rds_instance_not_publicly_accessible(handle, db_instance_identifier: str, region: str) -> str:\n", " \"\"\"\n", " aws_make_rds_instance_not_publicly_accessible makes the specified RDS instance not publicly accessible.\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type db_instance_identifier: string\n", " :param db_instance_identifier: Identifier of the RDS instance.\n", "\n", " :type region: string\n", " :param region: Region of the RDS instance.\n", "\n", " :rtype: Response of the operation.\n", " \"\"\"\n", " try:\n", " rdsClient = handle.client('rds', region_name=region)\n", " rdsClient.modify_db_instance(\n", " DBInstanceIdentifier=db_instance_identifier,\n", " PubliclyAccessible=False\n", " )\n", " except Exception as e:\n", " raise e\n", " return f\"Public accessiblilty is being changed to False...\"\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(credentialsJson='''{\\\"credential_type\\\": \\\"CONNECTOR_TYPE_AWS\\\"}''')\n", "task.configure(inputParamsJson='''{\n", " \"db_instance_identifier\": \"iter.get(\\\\\"instance\\\\\")\",\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_public_instances\",\n", " \"iter_parameter\": [\"db_instance_identifier\",\"region\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_public_instances)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_make_rds_instance_not_publicly_accessible, lego_printer=aws_make_rds_instance_not_publicly_accessible_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "276822d0-0d5d-4023-83c1-3f8b12e50568", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "region,db_instance_identifier
In this Runbook, we demonstrated the use of unSkript's AWS actions. This runbook help to find publicly accessible RDS instances and change it to private. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "parameters": [ "channel", "region" ], "runbook_name": "Secure Publicly Accessible Amazon RDS Instances" }, "kernelspec": { "display_name": "Python 3.10.6 64-bit", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "parameterSchema": { "properties": { "rds_instances": { "description": "List of RDS instance DB Identifiers(names).", "title": "rds_instances", "type": "array" }, "region": { "description": "RDS instance region", "title": "region", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": {}, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Secure_Publicly_Accessible_RDS_Instances.json ================================================ { "name": "Secure Publicly Accessible Amazon RDS Instances", "description": "This runbook can be used to find the publicly accessible RDS instances for the given AWS region and change them to private.", "uuid": "dda26fd556dd6b59e2fac9c9ed6e81fc19e5374746049d494237bcdc6a17fae4", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS","CATEGORY_TYPE_SECOPS" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Secure_Publicly_accessible_Amazon_RDS_Snapshot.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "9bdb4ffc-b726-49e9-95b8-063371b3fa61", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1. Get publicly accessible DB snapshots
2. Change the public access to private
Using unSkript's Get Publicly Accessible DB Snapshots in RDS action we will fetch all the publicly accessible snapshots from the list of manual DB snapshots.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region(Optional)
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "6625eaae-2435-4542-a589-8456221c7e88", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_RDS" ], "actionDescription": "AWS Get Publicly Accessible DB Snapshots in RDS", "actionEntryFunction": "aws_get_publicly_accessible_db_snapshots", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "7c0d143556a33b81d3fb1ff08dfdd59cebe5d58b00b55e8ae660df2e42f71bfe" ], "actionNextHopParameterMapping": {}, "actionNouns": [ "aws", "database", "snapshots", "public", "accessible" ], "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Get Publicly Accessible DB Snapshots in RDS", "actionType": "LEGO_TYPE_AWS", "actionVerbs": [ "get" ], "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "e665224418391a4deafae48140c5b83c8af7b881dd281acbd79ed9ceb52aad4f", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "AWS Get Publicly Accessible DB Snapshots in RDS", "execution_data": { "last_date_success_run_cell": "2023-05-17T17:53:59.863Z" }, "id": 5, "index": 5, "inputData": [ { "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "Region of the RDS", "title": "Region", "type": "string" } }, "required": [], "title": "aws_get_publicly_accessible_db_snapshots", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Get Publicly Accessible DB Snapshots in RDS", "orderProperties": [ "region" ], "outputParams": { "output_name": "publicly_accessible_snapshots", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not public_snapshot_ids", "tags": [], "uuid": "e665224418391a4deafae48140c5b83c8af7b881dd281acbd79ed9ceb52aad4f", "version": "1.0.0" }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "import pprint\n", "from typing import Optional, Tuple\n", "from pydantic import BaseModel, Field\n", "from unskript.legos.utils import CheckOutput\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "from unskript.legos.aws.aws_filter_all_manual_database_snapshots.aws_filter_all_manual_database_snapshots import aws_filter_all_manual_database_snapshots\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_publicly_accessible_db_snapshots_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_get_publicly_accessible_db_snapshots(handle, region: str=None) -> Tuple:\n", " \"\"\"aws_get_publicly_accessible_db_snapshots lists of publicly accessible\n", " db_snapshot_identifier.\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: string\n", " :param region: Region of the RDS.\n", "\n", " :rtype: Object with status, result having publicly accessible Snapshots\n", " Identifier in RDS, error\n", " \"\"\"\n", " manual_snapshots_list = []\n", " result = []\n", " all_regions = [region]\n", " if region is None or not region:\n", " all_regions = aws_list_all_regions(handle=handle)\n", " try:\n", " for r in all_regions:\n", " snapshots_dict = {}\n", " output = aws_filter_all_manual_database_snapshots(handle=handle, region=r)\n", " snapshots_dict[\"region\"] = r\n", " snapshots_dict[\"snapshot\"] = output\n", " manual_snapshots_list.append(snapshots_dict)\n", " except Exception as e:\n", " raise e\n", "\n", " for all_snapshots in manual_snapshots_list:\n", " try:\n", " ec2Client = handle.client('rds', region_name=all_snapshots['region'])\n", " for each_snapshot in all_snapshots['snapshot']:\n", " response = ec2Client.describe_db_snapshot_attributes(\n", " DBSnapshotIdentifier=each_snapshot\n", " )\n", " db_attribute = response[\"DBSnapshotAttributesResult\"]\n", " for value in db_attribute['DBSnapshotAttributes']:\n", " p_dict={}\n", " if \"all\" in value[\"AttributeValues\"]:\n", " p_dict[\"region\"] = all_snapshots['region']\n", " p_dict[\"open_snapshot\"] = db_attribute['DBSnapshotIdentifier']\n", " result.append(p_dict)\n", " except Exception:\n", " pass\n", " if len(result)!=0:\n", " return (False, result)\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not public_snapshot_ids\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"publicly_accessible_snapshots\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_publicly_accessible_db_snapshots, lego_printer=aws_get_publicly_accessible_db_snapshots_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "0867d634-3d7c-473e-b5fe-06f042452c63", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "all_snapshots
This action filters regions that have no manual DB snapshots and creates a list those that have
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": 5, "id": "87b57cf2-3eeb-45e6-9eb5-e7106692ea61", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-02T16:13:22.904Z" }, "name": "Create List of Public DB Snapshots", "orderProperties": [], "tags": [], "title": "Create List of Public DB Snapshots" }, "outputs": [], "source": [ "all_public_snapshots = []\n", "try:\n", " if publicly_accessible_snapshots[0] == False:\n", " for snapshot in publicly_accessible_snapshots[1]:\n", " all_public_snapshots.append(snapshot)\n", "except Exception as e:\n", " if public_snapshot_ids:\n", " for snap in public_snapshot_ids:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"open_snapshot\"] = snap\n", " all_public_snapshots.append(data_dict)\n", " else:\n", " raise Exception(e)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "93579475-9902-4be4-b9de-fd6fadbc2710", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_public_snapshots
Using unSkript's Modify Publicly Accessible RDS Snapshots action we will modify the access to all the publicly accessible snapshots from the public to private.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 6, "id": "2e58c74d-fd35-429c-b787-0be39f56d0b5", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "1a325ba527fbf504000b5d5961f4ef6366daed4a50951e657bfff87eedad52df", "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "AWS Modify Publicly Accessible RDS Snapshots", "execution_data": { "last_date_success_run_cell": "2023-01-30T18:04:38.167Z" }, "id": 239, "index": 239, "inputData": [ { "db_snapshot_identifier": { "constant": false, "value": "\"iter.get(\\\\\"open_snapshot\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "db_snapshot_identifier": { "description": "DB Snapshot Idntifier of RDS.", "title": "DB Snapshot Idntifier", "type": "string" }, "region": { "description": "Region of the RDS.", "title": "Region", "type": "string" } }, "required": [ "db_snapshot_identifier", "region" ], "title": "aws_modify_public_db_snapshots", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "db_snapshot_identifier": "open_snapshot", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_public_snapshots" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Modify Publicly Accessible RDS Snapshots", "nouns": [], "orderProperties": [ "db_snapshot_identifier", "region" ], "output": { "type": "" }, "printOutput": true, "startcondition": "len(all_public_snapshots) != 0", "tags": [ "aws_modify_public_db_snapshots" ], "verbs": [] }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import List, Dict\n", "from pydantic import BaseModel, Field\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_modify_public_db_snapshots_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_modify_public_db_snapshots(handle, db_snapshot_identifier: str, region: str) -> List:\n", " \"\"\"aws_modify_public_db_snapshots lists of publicly accessible DB Snapshot Idntifier Info.\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type db_snapshot_identifier: string\n", " :param db_snapshot_identifier: DB Snapshot Idntifier of RDS.\n", "\n", " :type region: string\n", " :param region: Region of the RDS.\n", "\n", " :rtype: List with Dict of DB Snapshot Idntifier Info.\n", " \"\"\"\n", "\n", "\n", " ec2Client = handle.client('rds', region_name=region)\n", " result = []\n", " try:\n", " response = ec2Client.modify_db_snapshot_attribute(\n", " DBSnapshotIdentifier=db_snapshot_identifier,\n", " AttributeName='restore',\n", " ValuesToRemove=['all'])\n", "\n", " result.append(response)\n", "\n", " except Exception as error:\n", " result.append(error)\n", "\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\",\n", " \"db_snapshot_identifier\": \"iter.get(\\\\\"open_snapshot\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_public_snapshots\",\n", " \"iter_parameter\": [\"region\",\"db_snapshot_identifier\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_public_snapshots) != 0\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_modify_public_db_snapshots, lego_printer=aws_modify_public_db_snapshots_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "faee311b-d041-46f6-8734-396ccba4e664", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "region,db_snapshot_identifier
In this Runbook, we were able to secure all the publicly accessible AWS RDS DB Snapshots by using unSkript's AWS actions. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Secure Publicly accessible Amazon RDS Snapshot", "parameters": [ "region" ] }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "parameterSchema": { "properties": { "public_snapshot_ids": { "description": "List of publicly accessible snapshot ids.", "title": "public_snapshot_ids", "type": "array" }, "region": { "description": "AWS Region(s) to get publicly accessible RDS Db Snapshots. Eg: us-west-2.", "title": "region", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Secure_Publicly_accessible_Amazon_RDS_Snapshot.json ================================================ { "name": "Secure Publicly accessible Amazon RDS Snapshot", "description": "This lego can be used to list all the manual database snapshots in the given region. Get publicly accessible DB snapshots in RDS and Modify the publicly accessible DB snapshots in RDS to private.", "uuid": "7c0d143556a33b81d3fb1ff08dfdd59cebe5d58b00b55e8ae660df2e42f71bfe", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS","CATEGORY_TYPE_SECOPS" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Stop_Idle_EC2_Instances.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "0bc2da9b-06db-4411-b7a1-60bf674c3cd4", "metadata": { "jupyter": { "source_hidden": false }, "name": "Runbook Overview", "orderProperties": [], "tags": [], "title": "Runbook Overview" }, "source": [ "1. AWS Find Idle Instances
2. Stop AWS Instances
Here we will use unSkript AWS Find Idle Instances action. This action filters all idle instances from the given region, idle_cpu_threshold and idle_duration return a list of all the idle instances. It will execute if the Instance_Ids parameter is not passed.
\n", "\n", "Input parameters:
\n", "region, idle_cpu_threshold, idle_duration
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "933eb1d6-32e2-4dd2-87cf-b27fbb51c2d0", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2" ], "actionDescription": "Find Idle EC2 instances", "actionEntryFunction": "aws_find_idle_instances", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "c03babff32b83949e6ca20a49901d42a5a74ed3036de4609096390c9f6d0851a" ], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Find Idle Instances", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "c25a662a49587285082c36455564eed5664cc852926fcc2cec374300492df09d", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "Find Idle EC2 instances", "execution_data": { "last_date_success_run_cell": "2023-05-17T16:28:20.633Z" }, "id": 1, "index": 1, "inputData": [ { "idle_cpu_threshold": { "constant": false, "value": "int(idle_cpu_threshold)" }, "idle_duration": { "constant": false, "value": "int(idle_duration)" }, "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "idle_cpu_threshold": { "default": 5, "description": "Idle CPU threshold (in percent)", "title": "Idle CPU Threshold", "type": "integer" }, "idle_duration": { "default": 6, "description": "Idle duration (in hours)", "title": "Idle Duration", "type": "integer" }, "region": { "default": "", "description": "AWS Region to get the instances from. Eg: \"us-west-2\"", "title": "Region", "type": "string" } }, "required": [], "title": "aws_find_idle_instances", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Find Idle Instances", "orderProperties": [ "idle_cpu_threshold", "idle_duration", "region" ], "outputParams": { "output_name": "idle_instances", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not instance_ids", "tags": [], "uuid": "c25a662a49587285082c36455564eed5664cc852926fcc2cec374300492df09d", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2023 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "from typing import Optional, Tuple\n", "import pprint\n", "import datetime\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_find_idle_instances_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def is_instance_idle(instance_id , idle_cpu_threshold, idle_duration, cloudwatchclient):\n", " try:\n", " now = datetime.datetime.utcnow()\n", " start_time = now - datetime.timedelta(hours=idle_duration)\n", " cpu_utilization_stats = cloudwatchclient.get_metric_statistics(\n", " Namespace=\"AWS/EC2\",\n", " MetricName=\"CPUUtilization\",\n", " Dimensions=[{\"Name\": \"InstanceId\", \"Value\": instance_id}],\n", " StartTime=start_time.isoformat(),\n", " EndTime=now.isoformat(),\n", " Period=3600,\n", " Statistics=[\"Average\"],\n", " )\n", " if not cpu_utilization_stats[\"Datapoints\"]:\n", " return False\n", " average_cpu = sum([datapoint[\"Average\"] for datapoint in cpu_utilization_stats[\"Datapoints\"]]) / len(cpu_utilization_stats[\"Datapoints\"])\n", " except Exception as e:\n", " raise e\n", " return average_cpu < idle_cpu_threshold\n", "\n", "@beartype\n", "def aws_find_idle_instances(handle, idle_cpu_threshold:int = 5, idle_duration:int = 6, region:str='') -> Tuple:\n", " \"\"\"aws_find_idle_instances finds idle EC2 instances\n", "\n", " :type region: string\n", " :param region: AWS Region to get the instances from. Eg: \"us-west-2\"\n", "\n", " :type idle_cpu_threshold: int\n", " :param idle_cpu_threshold: (in percent) Idle CPU threshold (in percent)\n", "\n", " :type idle_duration: int\n", " :param idle_duration: (in hours) Idle CPU threshold (in hours)\n", "\n", " :rtype: Tuple with status result and list of Idle Instances.\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", " for reg in all_regions:\n", " try:\n", " ec2client = handle.client('ec2', region_name=reg)\n", " cloudwatchclient = handle.client(\"cloudwatch\", region_name=reg)\n", " all_instances = ec2client.describe_instances()\n", " for instance in all_instances['Reservations']:\n", " for i in instance['Instances']:\n", " if i['State'][\"Name\"] == \"running\" and is_instance_idle(i['InstanceId'], reg, idle_cpu_threshold,idle_duration, cloudwatchclient ):\n", " idle_instances = {}\n", " idle_instances[\"instance\"] = i['InstanceId']\n", " idle_instances[\"region\"] = reg\n", " result.append(idle_instances)\n", " except Exception:\n", " pass\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"idle_cpu_threshold\": \"int(idle_cpu_threshold)\",\n", " \"idle_duration\": \"int(idle_duration)\",\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not instance_ids\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"idle_instances\")\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_find_idle_instances, lego_printer=aws_find_idle_instances_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "81e38fc8-6cde-4287-a728-5aa6c2caa07b", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 Extension", "orderProperties": [], "tags": [], "title": "Step-1 Extension" }, "source": [ "Output variable:
\n", "idle_instances
In this action, we will pass the list of idle instances from Step 1 and sort the output as per Step 2.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 22, "id": "135f2a41-a19c-4477-815a-911bb8fd5620", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-05-17T16:33:14.480Z" }, "name": "Modify Idle Instances Output", "orderProperties": [], "tags": [], "title": "Modify Idle Instances Output" }, "outputs": [], "source": [ "idle_instances_list = []\n", "try:\n", " if idle_instances[0] == False:\n", " for instance in idle_instances[1]:\n", " idle_instances_list.append(instance)\n", "except Exception as e:\n", " if instance_ids:\n", " for instance in instance_ids:\n", " instance_dict = {}\n", " instance_dict[\"instance\"] = instance\n", " instance_dict[\"region\"] = region\n", " idle_instances_list.append(instance_dict)\n", " else:\n", " raise Exception(e)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "86252292-3008-4943-869e-c9b581ef4306", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "Output variable:
\n", "idle_instances_list
Here we will use unSkript Stop AWS Instances action. In this action, we will pass the list of idle instances from step 1 and stop those instances.
\n", "\n", "\n", "Input parameters:
\n", "instance_id,region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "ab0bab01-d02a-44d2-aa4f-82652a585f93", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2" ], "actionDescription": "Stop an AWS Instance", "actionEntryFunction": "aws_stop_instances", "actionIsCheck": false, "actionIsRemediation": true, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": [ "aws", "instances" ], "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Stop AWS Instances", "actionType": "LEGO_TYPE_AWS", "actionVerbs": [ "stop" ], "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "ef6e03e0bb46f1d9eb58405e5eed4b962c4ae9eeaaf64877c1c4e820c2854c6e", "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "Stop an AWS Instance", "execution_data": { "last_date_success_run_cell": "2023-04-25T14:38:41.240Z" }, "id": 2, "index": 2, "inputData": [ { "instance_id": { "constant": false, "value": "\"iter.get(\\\\\"instance\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "instance_id": { "description": "ID of the instance to be stopped.", "title": "Instance Id", "type": "string" }, "region": { "description": "AWS Region of the instance.", "title": "Region", "type": "string" } }, "required": [ "instance_id", "region" ], "title": "aws_stop_instances", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "instance_id": "instance", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "idle_instances_list" } } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Stop AWS Instances", "orderProperties": [ "instance_id", "region" ], "outputParams": { "output_name": "stop_instances", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "len(idle_instances_list) != 0", "tags": [], "title": "Stop AWS Instances", "uuid": "ef6e03e0bb46f1d9eb58405e5eed4b962c4ae9eeaaf64877c1c4e820c2854c6e", "version": "1.0.0" }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Dict\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_stop_instances_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_stop_instances(handle, instance_id: str, region: str) -> Dict:\n", " \"\"\"aws_stop_instances Stops instances.\n", "\n", " :type instance_id: string\n", " :param instance_id: String containing the name of AWS EC2 instance\n", "\n", " :type region: string\n", " :param region: AWS region for instance\n", "\n", " :rtype: Dict with the stopped instances state info.\n", " \"\"\"\n", "\n", " ec2Client = handle.client('ec2', region_name=region)\n", " output = {}\n", " res = ec2Client.stop_instances(InstanceIds=[instance_id])\n", " for instances in res['StoppingInstances']:\n", " output[instances['InstanceId']] = instances['CurrentState']\n", "\n", " return output\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"instance_id\": \"iter.get(\\\\\"instance\\\\\")\",\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"idle_instances_list\",\n", " \"iter_parameter\": [\"instance_id\",\"region\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(idle_instances_list) != 0\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"stop_instances\")\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_stop_instances, lego_printer=aws_stop_instances_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "086512e7-14b2-4894-bd36-0e8f63e5a8e7", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "### Conclusion\n", "Output variable:
\n", "stop_instances
In this Runbook, we demonstrated the use of unSkript's AWS actions to filter idle instances and stop those. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Stop Idle EC2 Instances", "parameters": null }, "kernelspec": { "display_name": "unSkript (Build: 1166)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "parameterSchema": { "properties": { "idle_cpu_threshold": { "default": 5, "description": "The CPU utilization threshold below which an instance is considered idle (e.g., 10).", "title": "idle_cpu_threshold", "type": "number" }, "idle_duration": { "default": 6, "description": "The duration of time (in hours) for which an instance must have CPU utilization below the threshold to be considered idle (e.g., 24 for 1 day).", "title": "idle_duration", "type": "number" }, "instance_ids": { "description": "\nList of idle instance ids.", "title": "instance_ids", "type": "array" }, "region": { "description": "AWS Region e.g. \"us-west-2\"", "title": "region", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "show_action_drag_hint_done": { "environment_id": "1499f27c-6406-4fbd-bd1b-c6f92800018f", "environment_name": "Staging", "execution_id": "", "inputs_for_searched_lego": "", "notebook_id": "d4159cb3-6c83-4ba5-a2f7-d23c0777076b.ipynb", "parameters": null, "runbook_name": "Stop Idle EC2 Instances", "search_string": "", "show_tool_tip": true, "tenant_id": "982dba5f-d9df-48ae-a5bf-ec1fc94d4882", "tenant_url": "https://tenant-staging.alpha.unskript.io", "user_email_id": "support+staging@unskript.com", "workflow_id": "f8ead207-81c0-414a-a15b-76fcdefafe8d" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Stop_Idle_EC2_Instances.json ================================================ { "name": "Stop Idle EC2 Instances", "description": "This runbook can be used to Stop all EC2 Instances that are idle using given cpu threshold and duration.", "uuid": "c03babff32b83949e6ca20a49901d42a5a74ed3036de4609096390c9f6d0851a", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_COST_OPT" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Stop_Untagged_EC2_Instances.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "cadbcf65-5c79-4496-81ef-c9e1e18ee932", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Get all untagged EC2 instance
2) Stop Untagged EC2 instances
Here we will fetch all the untagged EC2 instances.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region(Optional)
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "44455214-e204-4278-818f-47734b8194c4", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2" ], "actionDescription": "Filter AWS Untagged EC2 Instances", "actionEntryFunction": "aws_filter_untagged_ec2_instances", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "a16703da15d9e9e2d8a56b146e730b5e4c1496721ff1dc8606a5021d521ed9e3" ], "actionNextHopParameterMapping": {}, "actionNouns": [ "aws", "instances", "untagged" ], "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Filter AWS Untagged EC2 Instances", "actionType": "LEGO_TYPE_AWS", "actionVerbs": [ "filter" ], "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "0ebc91f11a150d8933a8ebf4cf8824f0ca8cd9e64383b30dd9fad4e7b9b26ac9", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "Filter AWS Untagged EC2 Instances", "id": 1, "index": 1, "inputData": [ { "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "Name of the AWS Region", "title": "Region", "type": "string" } }, "required": [], "title": "aws_filter_untagged_ec2_instances", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Filter AWS Untagged EC2 Instances", "orderProperties": [ "region" ], "outputParams": { "output_name": "untagged_instances", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not instance_ids", "tags": [ "aws_filter_untagged_ec2_instances" ], "uuid": "0ebc91f11a150d8933a8ebf4cf8824f0ca8cd9e64383b30dd9fad4e7b9b26ac9", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import List, Tuple, Optional\n", "from unskript.connectors.aws import aws_get_paginator\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_filter_untagged_ec2_instances_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def check_untagged_instance(res, r):\n", " instance_list = []\n", " for reservation in res:\n", " for instance in reservation['Instances']:\n", " instances_dict = {}\n", " tags = instance.get('Tags', None)\n", " if tags is None:\n", " instances_dict['region']= r\n", " instances_dict['instanceID']= instance['InstanceId']\n", " instance_list.append(instances_dict)\n", " return instance_list\n", "\n", "\n", "@beartype\n", "def aws_filter_untagged_ec2_instances(handle, region: str= None) -> Tuple:\n", " \"\"\"aws_filter_untagged_ec2_instances Returns an array of instances which has no tags.\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: str\n", " :param region: Region to filter instances.\n", "\n", " :rtype: Tuple of status, and list of untagged EC2 Instances\n", " \"\"\"\n", " result = []\n", " all_instances = []\n", " all_regions = [region]\n", " if region is None or len(region)==0:\n", " all_regions = aws_list_all_regions(handle=handle)\n", " for r in all_regions:\n", " try:\n", " ec2Client = handle.client('ec2', region_name=r)\n", " res = aws_get_paginator(ec2Client, \"describe_instances\", \"Reservations\")\n", " untagged_instances = check_untagged_instance(res, r)\n", " if len(untagged_instances)!=0:\n", " all_instances.append(untagged_instances)\n", " except Exception as e:\n", " pass\n", " try:\n", " result = all_instances[0]\n", " except Exception as e:\n", " pass\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not instance_ids\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"untagged_instances\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_filter_untagged_ec2_instances, lego_printer=aws_filter_untagged_ec2_instances_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "4bc1ab78-471e-4f0a-9933-d84abb36dada", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "untagged_instances
\n", "" ] }, { "cell_type": "code", "execution_count": 5, "id": "087ae782-c90b-46ba-8ed0-76bf9992f51d", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-14T17:26:37.448Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of untagged instances", "orderProperties": [], "tags": [], "title": "Create List of untagged instances" }, "outputs": [], "source": [ "all_untagged_instances = []\n", "try:\n", " if untagged_instances[0] == False:\n", " if len(untagged_instances[1])!=0:\n", " all_untagged_instances=untagged_instances[1]\n", "except Exception:\n", " for ids in instance_ids:\n", " data_dict = {}\n", " data_dict[\"region\"] = region\n", " data_dict[\"instanceID\"] = ids\n", " all_untagged_instances.append(data_dict)\n", "print(all_untagged_instances)\n" ] }, { "cell_type": "markdown", "id": "2e75ed26-5dfd-4a64-a6af-1aa336ac9455", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 3", "orderProperties": [], "tags": [], "title": "Step 3" }, "source": [ "This action captures the following output:
\n", "all_untagged_instances
Using unSkript's Stop EC2 instances action we will stop all untagged EC2 instances found in Step 1.\n", "\n", "
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "1432974e-5c85-48f7-9b17-c3ef3be94152", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2" ], "actionDescription": "Stop an AWS Instance", "actionEntryFunction": "aws_stop_instances", "actionIsCheck": false, "actionIsRemediation": true, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": [ "aws", "instances" ], "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Stop AWS Instances", "actionType": "LEGO_TYPE_AWS", "actionVerbs": [ "stop" ], "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "ef6e03e0bb46f1d9eb58405e5eed4b962c4ae9eeaaf64877c1c4e820c2854c6e", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "Stop an AWS Instance", "id": 3, "index": 3, "inputData": [ { "instance_id": { "constant": false, "value": "\"iter.get(\\\\\"instanceID\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "instance_id": { "description": "ID of the instance to be stopped.", "title": "Instance Id", "type": "string" }, "region": { "description": "AWS Region of the instance.", "title": "Region", "type": "string" } }, "required": [ "instance_id", "region" ], "title": "aws_stop_instances", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "instance_id": "instanceID", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_untagged_instances" } } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Stop AWS Instances", "orderProperties": [ "instance_id", "region" ], "printOutput": true, "startcondition": "len(all_untagged_instances)!=0", "tags": [ "aws_stop_instances" ], "uuid": "ef6e03e0bb46f1d9eb58405e5eed4b962c4ae9eeaaf64877c1c4e820c2854c6e", "version": "1.0.0" }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "import pprint\n", "from typing import Dict\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_stop_instances_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_stop_instances(handle, instance_id: str, region: str) -> Dict:\n", " \"\"\"aws_stop_instances Stops instances.\n", "\n", " :type instance_id: string\n", " :param instance_id: String containing the name of AWS EC2 instance\n", "\n", " :type region: string\n", " :param region: AWS region for instance\n", "\n", " :rtype: Dict with the stopped instances state info.\n", " \"\"\"\n", "\n", " ec2Client = handle.client('ec2', region_name=region)\n", " output = {}\n", " res = ec2Client.stop_instances(InstanceIds=[instance_id])\n", " for instances in res['StoppingInstances']:\n", " output[instances['InstanceId']] = instances['CurrentState']\n", "\n", " return output\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=False)\n", "task.configure(inputParamsJson='''{\n", " \"instance_id\": \"iter.get(\\\\\"instanceID\\\\\")\",\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_untagged_instances\",\n", " \"iter_parameter\": [\"instance_id\",\"region\"]\n", " }''')\n", "\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_untagged_instances)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_stop_instances, lego_printer=aws_stop_instances_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "4df3773a-43ff-43f8-9693-505c04936438", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "region(Optional)
In this Runbook, we were able to find all untagged EC2 instances and stop them using unSkript's AWS actions. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Stop all Untagged AWS EC2 Instances", "parameters": [ "region", "execution_flag" ] }, "kernelspec": { "display_name": "unSkript (Build: 1169)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "parameterSchema": { "properties": { "instance_ids": { "description": "List of EC2 instance IDs", "title": "instance_ids", "type": "array" }, "region": { "description": "AWS region to check for untagged EC2 instances. Eg: \"us-west-2\". If left empty, all regions will be considered.", "title": "region", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": {}, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Stop_Untagged_EC2_Instances.json ================================================ { "name": "Stop all Untagged AWS EC2 Instances", "description": "This runbook can be used to Stop all EC2 Instances that are Untagged", "uuid": "a16703da15d9e9e2d8a56b146e730b5e4c1496721ff1dc8606a5021d521ed9e3", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_COST_OPT" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Terminate_EC2_Instances_Without_Valid_Lifetime_Tag.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "280d4d6f-a47c-4fa2-8d55-a4e19899d46c", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Filter AWS EC2 Instances Without Lifetime Tag
2) Terminate AWS Instance
Here we will use unSkript Filter AWS EC2 Instances Without Lifetime Tag action to get all the EC2 instances which don't have lifetime tag.
\n", "\n", "\n", "This action takes the following parameters:
\n", "lifetime_tag,region, termination_date_tag (all Optional)
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "5fa6542a-0d95-4200-8fe9-d502c31d59c7", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2" ], "actionDescription": "Filter AWS EC2 Instances Without Termination and Lifetime Tag and Check of they are valid", "actionEntryFunction": "aws_filter_instances_without_termination_and_lifetime_tag", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [ "29ce1935204c64d816fd1f01f4fe41e8d8bd47725b899535c6acee703a7bcf0d" ], "actionNextHopParameterMapping": {}, "actionNouns": [ "aws", "instances", "without", "termination", "lifetime", "tag" ], "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Filter AWS EC2 Instances Without Termination and Lifetime Tag", "actionType": "LEGO_TYPE_AWS", "actionVerbs": [ "filter" ], "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "6cc8a1355937c21df3ace495375225012fa8915f4125ad143367e0feb34486c5", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "Filter AWS EC2 Instances Without Termination and Lifetime Tag and Check of they are valid", "id": 1, "index": 1, "inputData": [ { "lifetime_tag_name": { "constant": false, "value": "lifetime_tag" }, "region": { "constant": false, "value": "region" }, "termination_tag_name": { "constant": false, "value": "termination_date_tag" } } ], "inputschema": [ { "properties": { "lifetime_tag_name": { "default": "\"lifetimeTag\"", "description": "Name of the Lifetime Date Tag given to an EC2 instance. By default \"lifetimeTag\" is considered ", "title": "Lifetime Tag Name", "type": "string" }, "region": { "default": "", "description": "Name of the AWS Region", "title": "Region", "type": "string" }, "termination_tag_name": { "default": "\"terminationDateTag\"", "description": "Name of the Termination Date Tag given to an EC2 instance. By default \"terminationDateTag\" is considered ", "title": "Termination Date Tag Name", "type": "string" } }, "required": [], "title": "aws_filter_instances_without_termination_and_lifetime_tag", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Filter AWS EC2 Instances Without Termination and Lifetime Tag", "orderProperties": [ "region", "termination_tag_name", "lifetime_tag_name" ], "outputParams": { "output_name": "untagged_ec2_instances", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not instance_ids", "tags": [ "aws_filter_instances_without_termination_and_lifetime_tag" ], "uuid": "6cc8a1355937c21df3ace495375225012fa8915f4125ad143367e0feb34486c5", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import List, Tuple, Optional\n", "from unskript.connectors.aws import aws_get_paginator\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "import pprint\n", "from datetime import datetime, date\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_filter_instances_without_termination_and_lifetime_tag_printer(output):\n", " if output is None:\n", " return\n", "\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def fetch_instances_from_valid_region(res,r, termination_tag_name, lifetime_tag_name):\n", " result=[]\n", " instances_dict={}\n", " for reservation in res:\n", " for instance in reservation['Instances']:\n", " try:\n", " tagged_instance = instance['Tags']\n", " tag_keys = [tags['Key'] for tags in tagged_instance]\n", " if termination_tag_name not in tag_keys or lifetime_tag_name not in tag_keys:\n", " result.append(instance['InstanceId'])\n", " elif termination_tag_name not in tag_keys and lifetime_tag_name not in tag_keys:\n", " result.append(instance['InstanceId'])\n", " if termination_tag_name in tag_keys:\n", " for x in instance['Tags']:\n", " if x['Key'] == termination_tag_name:\n", " right_now = date.today()\n", " date_object = datetime.strptime(x['Value'], '%d-%m-%Y').date()\n", " if date_object < right_now:\n", " result.append(instance['InstanceId'])\n", " elif x['Key'] == lifetime_tag_name:\n", " launch_time = instance['LaunchTime']\n", " convert_to_datetime = launch_time.strftime(\"%d-%m-%Y\")\n", " launch_date = datetime.strptime(convert_to_datetime,'%d-%m-%Y').date()\n", " if x['Value'] != 'INDEFINITE':\n", " if launch_date < right_now:\n", " result.append(instance['InstanceId'])\n", " except Exception as e:\n", " if len(instance['InstanceId'])!=0:\n", " result.append(instance['InstanceId'])\n", " if len(result)!=0:\n", " instances_dict['region']= r\n", " instances_dict['instances']= result\n", " return instances_dict\n", "\n", "@beartype\n", "def aws_filter_instances_without_termination_and_lifetime_tag(handle, region: str=None, termination_tag_name:str='terminationDateTag', lifetime_tag_name:str='lifetimeTag') -> Tuple:\n", " \"\"\"aws_filter_ec2_without_lifetime_tag Returns an List of instances which not have lifetime tag.\n", "\n", " Assumed tag key format - terminationDateTag, lifetimeTag\n", " Assumed Date format for both keys is -> dd-mm-yy\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: string\n", " :param region: Optional, Name of AWS Region\n", "\n", " :type termination_tag_name: string\n", " :param termination_tag_name: Optional, Name of the Termination Date Tag given to an EC2 instance. By default \"terminationDateTag\" is considered\n", "\n", " :type lifetime_tag_name: string\n", " :param lifetime_tag_name: Optional, Name of the Lifetime Date Tag given to an EC2 instance. By default \"lifetimeTag\" is considered\n", "\n", " :rtype: Tuple of status, instances which dont having terminationDateTag and lifetimeTag, and error\n", " \"\"\"\n", " final_list=[]\n", " all_regions = [region]\n", " if region is None or len(region) == 0:\n", " all_regions = aws_list_all_regions(handle=handle)\n", " for r in all_regions:\n", " try:\n", " ec2Client = handle.client('ec2', region_name=r)\n", " all_reservations = aws_get_paginator(ec2Client, \"describe_instances\", \"Reservations\")\n", " instances_without_tags = fetch_instances_from_valid_region(all_reservations, r, termination_tag_name, lifetime_tag_name)\n", " if len(instances_without_tags)!=0:\n", " final_list.append(instances_without_tags)\n", " except Exception as e:\n", " pass\n", " if len(final_list)!=0:\n", " return (False, final_list)\n", " else:\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"termination_tag_name\": \"termination_date_tag\",\n", " \"lifetime_tag_name\": \"lifetime_tag\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not instance_ids\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"untagged_instances\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_filter_instances_without_termination_and_lifetime_tag, lego_printer=aws_filter_instances_without_termination_and_lifetime_tag_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "9cd2a6cc-c0b6-48c7-837f-c623f8cf53d4", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "untagged_instances
This action filters regions that have no untagged EC2 instances and creates a list of the ones that have have
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": 5, "id": "ff6ee3fa-b94b-4679-943b-782b32c1a095", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-02T16:25:30.826Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Instances Without Termination and Lifetime Tag ", "orderProperties": [], "tags": [], "title": "Create List of Instances Without Termination and Lifetime Tag " }, "outputs": [], "source": [ "all_untagged_instances = []\n", "try:\n", " if untagged_instances[0] == False:\n", " if len(untagged_instances[1])!=0:\n", " all_untagged_instances=untagged_instances[1]\n", "except Exception:\n", " data_dict[\"region\"] = region\n", " data_dict[\"instances\"] = instance_ids\n", " all_untagged_instances.append(data_dict)\n", "print(all_untagged_instances)" ] }, { "cell_type": "markdown", "id": "c09c5eb2-a9a7-4119-aef4-b07e0fdd6c80", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_untagged_instances
This action terminates EC2 instances which don't have lifetime tag as captured in Step 1\ud83d\udc46
\n", "\n", "\n", "This action takes the following parameters:
\n", "instance_ids, region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "7f622b00-2f51-4f44-aeaa-18f67823a4ea", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "8744e8836d7a0aff41120620fa4d703dacff25b0dbb5c9c7b87b83783c6c9d18", "collapsed": true, "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Terminate AWS Instances", "id": 192, "index": 192, "inputData": [ { "instance_ids": { "constant": false, "value": "\"iter.get(\\\\\"instances\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "instance_ids": { "description": "List of instance IDs. For eg. [\"i-foo\", \"i-bar\"]", "items": { "type": "string" }, "title": "Instance IDs", "type": "array" }, "region": { "description": "AWS Region of the instance.", "title": "Region", "type": "string" } }, "required": [ "instance_ids", "region" ], "title": "aws_terminate_instance", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "instance_ids": "instances", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_untagged_instances" } } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Terminate AWS Instances", "nouns": [], "orderProperties": [ "instance_ids", "region" ], "output": { "type": "" }, "printOutput": true, "startcondition": "if terminate==True", "tags": [ "aws_terminate_instance" ], "verbs": [] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from typing import List, Dict\n", "from pydantic import BaseModel, Field\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_terminate_instance_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_terminate_instance(handle, instance_ids: List, region: str) -> Dict:\n", " \"\"\"aws_terminate_instance Returns an Dict of info terminated instance.\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type instance_ids: List\n", " :param instance_ids: Tag to filter Instances.\n", "\n", " :type region: string\n", " :param region: Used to filter the instance for specific region.\n", "\n", " :rtype: Dict of info terminated instance.\n", " \"\"\"\n", " ec2Client = handle.client('ec2', region_name=region)\n", " res = ec2Client.terminate_instances(InstanceIds=instance_ids)\n", "\n", " return res\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"instance_ids\": \"iter.get(\\\\\"instances\\\\\")\",\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_untagged_instances\",\n", " \"iter_parameter\": [\"instance_ids\",\"region\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"if terminate==True\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_terminate_instance, lego_printer=aws_terminate_instance_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "324ba188-b516-4100-aebd-18ec3ce8203c", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action captures the following output:
\n", "None
In this Runbook, we demonstrated the use of unSkript's AWS actions to filter untagged instances and terminate them. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Terminate EC2 Instances Without Valid Lifetime Tag", "parameters": [ "region", "terminate" ] }, "kernelspec": { "display_name": "Python 3.10.6 64-bit", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "parameterSchema": { "properties": { "instance_ids": { "description": "List of EC2 instance IDs", "title": "instance_ids", "type": "array" }, "lifetime_tag_name": { "default": "lifetimeTag", "description": "Tag name used to identify the lifetime of a given EC2 instance.", "title": "lifetime_tag_name", "type": "string" }, "region": { "description": "AWS Region to search for EC2 instances. Eg: \"us-west-2\"", "title": "region", "type": "string" }, "terminate": { "default": false, "description": "Check parameter to terminate instances without the termination and lifetime tag. If changed to True, all instances without the termination and lifetime tag will be terminated. By default the value is false", "title": "terminate", "type": "boolean" }, "termination_tag_name": { "default": "terminationDateTag", "description": "Tag name used to identify the termination date of a given EC2 instance.", "title": "termination_tag_name", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Terminate_EC2_Instances_Without_Valid_Lifetime_Tag.json ================================================ { "name": "Terminate EC2 Instances Without Valid Lifetime Tag", "description": "This runbook can be used to list all the EC2 instances which don't have a lifetime tag and then terminate them.", "uuid": "29ce1935204c64d816fd1f01f4fe41e8d8bd47725b899535c6acee703a7bcf0d", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_COST_OPT" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Update_RDS_Instances_from_Old_to_New_Generation.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "e2fffe48-5eb4-4177-95ec-7955cc381ad8", "metadata": { "jupyter": { "source_hidden": false }, "name": "Runbook Overview", "orderProperties": [], "tags": [], "title": "Runbook Overview" }, "source": [ "Here we will use unSkript AWS Get Older Generation RDS Instances action. This action filters all the rds instances from the given region and returns a list of all the older generation instance-type instances. It will execute if the rds_instance_ids parameter is not passed.
\n", "\n", "Input parameters:
\n", "region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "1924ed03-0486-43e1-a388-2b753939b386", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_RDS" ], "actionDescription": "AWS Get Older Generation RDS Instances action retrieves information about RDS instances using older generation instance types.", "actionEntryFunction": "aws_get_older_generation_rds_instances", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "AWS Get Older Generation RDS Instances", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "08da2db2f8fe2dbce378c314e54341b68ee2e9e99ae271f2acd044ef7e8bdee3", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "AWS Get Older Generation RDS Instances action retrieves information about RDS instances using older generation instance types.", "id": 1, "index": 1, "inputData": [ { "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region.", "title": "AWS Region", "type": "string" } }, "required": [], "title": "aws_get_older_generation_rds_instances", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "AWS Get Older Generation RDS Instances", "orderProperties": [ "region" ], "outputParams": { "output_name": "old_gen_rds_instances", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "show_tool_tip": true, "startcondition": "not rds_instance_ids", "tags": [], "uuid": "08da2db2f8fe2dbce378c314e54341b68ee2e9e99ae271f2acd044ef7e8bdee3", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2023 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Tuple\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "from unskript.connectors.aws import aws_get_paginator\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_older_generation_rds_instances_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def is_previous_gen_instance(instance_type):\n", " previous_gen_instance_types = ['db.m1', 'db.m2', 'db.t1']\n", " for prev_gen_type in previous_gen_instance_types:\n", " if instance_type.startswith(prev_gen_type):\n", " return True\n", " return False\n", "\n", "\n", "@beartype\n", "def aws_get_older_generation_rds_instances(handle, region: str = \"\") -> Tuple:\n", " \"\"\"aws_get_older_generation_rds_instances Gets all older generation RDS DB instances\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: string\n", " :param region: Optional, Region of the RDS.\n", "\n", " :rtype: Status, List of old RDS Instances\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", " for reg in all_regions:\n", " try:\n", " ec2Client = handle.client('rds', region_name=reg)\n", " response = aws_get_paginator(ec2Client, \"describe_db_instances\", \"DBInstances\")\n", " for db in response:\n", " instance_type = \".\".join(db['DBInstanceClass'].split(\".\", 2)[:2])\n", " response = is_previous_gen_instance(instance_type)\n", " if response:\n", " db_instance_dict = {}\n", " db_instance_dict[\"region\"] = reg\n", " db_instance_dict[\"instance\"] = db['DBInstanceIdentifier']\n", " result.append(db_instance_dict)\n", " except Exception:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not rds_instance_ids\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"old_gen_rds_instances\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_older_generation_rds_instances, lego_printer=aws_get_older_generation_rds_instances_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "a23d2c03-f186-470d-9947-ffba9bb49e63", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 Extension", "orderProperties": [], "tags": [], "title": "Step-1 Extension" }, "source": [ "Output variable:
\n", "old_gen_rds_instances
In this action, we modify the output from step 1 and return a list of aws cli command for the older generation RDS instances.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "5cbcb4b2-149f-43f7-b723-e2f3766c9980", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-05-12T10:41:25.703Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Modify Step-1 Output", "orderProperties": [], "tags": [], "title": "Modify Step-1 Output" }, "outputs": [], "source": [ "older_rds_instances = []\n", "try:\n", " if old_gen_rds_instances[0] == False:\n", " for instance in old_gen_rds_instances[1]:\n", " instance['instance_type'] = rds_instance_type\n", " command = \"aws rds modify-db-instance --db-instance-identifier \" + instance['instance'] + \" --db-instance-class \" + instance['instance_type'] + \" --region \" + instance['region'] + \" --apply-immediately\"\n", " older_rds_instances.append(command)\n", "except Exception as e:\n", " for i in rds_instance_ids:\n", " command = \"aws rds modify-db-instance --db-instance-identifier \" + i + \" --db-instance-class \" + rds_instance_type + \" --region \" + region + \" --apply-immediately\"\n", " older_rds_instances.append(command)\n", " else:\n", " raise Exception(e)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "d1f1a3bf-e7d4-4243-8a99-6e1b66abef29", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "Output variable:
\n", "older_rds_instances
Modify DB Instance Class
\n", "In this action, we pass an aws cli command to modify the RDS instance class.
\n", "\n", "\n", "Input parameters:
\n", "aws_command
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "0886a33e-052f-41bc-980f-6dd500a35a71", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_CLI" ], "actionDescription": "Execute command using AWS CLI", "actionEntryFunction": "aws_execute_cli_command", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_STR", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Run Command via AWS CLI", "actionType": "LEGO_TYPE_AWS", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "1db371aff42291641eb6ba83d7acc3fe28e2468d83be1552e8258dc878c0f70d", "collapsed": true, "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "Execute command using AWS CLI", "execution_data": { "last_date_success_run_cell": "2023-05-12T10:20:20.403Z" }, "id": 1, "index": 1, "inputData": [ { "aws_command": { "constant": false, "value": "iter_item" } } ], "inputschema": [ { "properties": { "aws_command": { "description": "AWS Command eg \"aws ec2 describe-instances\"", "title": "AWS Command", "type": "string" } }, "required": [ "aws_command" ], "title": "aws_execute_cli_command", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": "aws_command", "iter_list": { "constant": false, "objectItems": false, "value": "older_rds_instances" } } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "name": "Run Command via AWS CLI: Modify DB Instance Class", "orderProperties": [ "aws_command" ], "outputParams": { "output_name": "modified_output", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "len(older_rds_instances)!=0", "tags": [], "title": "Run Command via AWS CLI: Modify DB Instance Class", "uuid": "1db371aff42291641eb6ba83d7acc3fe28e2468d83be1552e8258dc878c0f70d", "version": "1.0.0" }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2021 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_execute_cli_command_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_execute_cli_command(handle, aws_command: str) -> str:\n", "\n", " result = handle.aws_cli_command(aws_command)\n", " if result is None or result.returncode != 0:\n", " print(\n", " f\"Error while executing command ({aws_command}): {result}\")\n", " return str()\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"aws_command\": \"iter_item\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"older_rds_instances\",\n", " \"iter_parameter\": \"aws_command\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(older_rds_instances)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"modified_output\")\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_execute_cli_command, lego_printer=aws_execute_cli_command_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "37022260-01cb-4cb7-9ed1-aeb30ac4ad64", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "Output variable:
\n", "modified_output
In this Runbook, we demonstrated the use of unSkript's AWS actions to get RDS instances with old generation and modify them to the new given instance class. To view the full platform capabunscriptedof unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "AWS Update RDS Instances from Old to New Generation", "parameters": null }, "kernelspec": { "display_name": "unSkript (Build: 1169)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "parameterSchema": { "properties": { "rds_instance_ids": { "description": "RDS Instance Ids.", "title": "rds_instance_ids", "type": "array" }, "rds_instance_type": { "description": "RDS Instance Type e.g. \"db.t3.micro\"", "title": "rds_instance_type", "type": "string" }, "region": { "description": "AWS Region", "title": "region", "type": "string" } }, "required": [ "rds_instance_type" ], "title": "Schema", "type": "object" }, "show_action_drag_hint_done": { "environment_id": "1499f27c-6406-4fbd-bd1b-c6f92800018f", "environment_name": "Staging", "execution_id": "", "inputs_for_searched_lego": "", "notebook_id": "d4159cb3-6c83-4ba5-a2f7-d23c0777076b.ipynb", "parameters": null, "runbook_name": "gcp", "search_string": "", "show_tool_tip": true, "tenant_id": "982dba5f-d9df-48ae-a5bf-ec1fc94d4882", "tenant_url": "https://tenant-staging.alpha.unskript.io", "user_email_id": "support+staging@unskript.com", "workflow_id": "f8ead207-81c0-414a-a15b-76fcdefafe8d" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Update_RDS_Instances_from_Old_to_New_Generation.json ================================================ { "name": "AWS Update RDS Instances from Old to New Generation", "description": "This runbook can be used to find the old generation RDS instances for the given AWS region and modify then to the given instance class.", "uuid": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS","CATEGORY_TYPE_SECOPS" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_Update_Redshift_Database.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "5331e347-9cea-40fe-9828-959657edd35d", "metadata": { "name": "Introduction", "orderProperties": [], "tags": [], "title": "Introduction" }, "source": [ "This Runbook takes data from an S3 bucket, and populates an AWS Redshift table with the data.
\n", "The initial reason for this RunBook was to populate AWS Cost and Usage Reports (CUR) into Redshift The CUR is dumped into a S3 bucket. In order to run queries, it must be copied into a Redshift table.
\n", "We have written a series of blog posts on this:
\n", "https://unskript.com/blog/keeping-your-cloud-costs-in-check-automated-aws-cost-charts-and-alerting/
\n", "https://unskript.com/blog/cloud-costs-charting-daily-ec2-usage-and-cost/
\n", "\n", "
\n", "
Here are the steps you need to complete before you can run this runbook:
\n", "Every month, you'll need to create the new table in RedShift manually. (this is a TODO for anyone interested in contributing!)
\n", "\n", "
![]()
\n", "
![]()
\n", "
\n", "
If this RunBook is run Programatically - no dates will be changed - just a Slack alert sent.
\n", "\n", "
\n", "
Here we will use unSkript AWS Get Resources with the expiration tag - the tag name is an input parameter for the runbook.
" ] }, { "cell_type": "code", "execution_count": 12, "id": "0ec169e9-f3f2-400d-9b58-e4a598769e61", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": true, "action_uuid": "aee6cabb55096d5cf6098faa7e4a94135e8f5b0572b36d4b3252d7745fae595b", "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "customCell": true, "description": "AWS Get Untagged Resources", "execution_data": { "last_date_success_run_cell": "2023-06-07T14:51:19.386Z" }, "id": 187, "index": 187, "inputData": [ { "region": { "constant": false, "value": "Region" }, "tag": { "constant": false, "value": "expiration_tag" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region.", "title": "Region", "type": "string" }, "tag": { "default": "\"Tag_Key\"", "description": "The Tag to search for", "title": "tag", "type": "string" } }, "required": [ "region", "tag" ], "title": "aws_get_resources_with_expiration_tag", "type": "object" } ], "legotype": "LEGO_TYPE_AWS", "name": "AWS Get Resources With Expiration Tag", "nouns": [ "aws", "resources" ], "orderProperties": [ "region", "tag" ], "output": { "type": "" }, "outputParams": { "output_name": "ExpirationResources", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "service_id_enabled": false, "tags": [ "aws_get_untagged_resources" ], "title": "AWS Get Resources With Expiration Tag", "verbs": [ "list" ] }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import List, Dict\n", "from pydantic import BaseModel, Field\n", "from unskript.connectors.aws import aws_get_paginator\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_resources_with_expiration_tag_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(f\"there are {len(output)} resources missing tag {Tag_Key}. We can fix a max of 20.\" )\n", "\n", "\n", "@beartype\n", "def aws_get_resources_with_expiration_tag(handle, region: str, tag:str) -> List:\n", "\n", " ec2Client = handle.client('resourcegroupstaggingapi', region_name=region)\n", " result = []\n", " try:\n", " response = aws_get_paginator(ec2Client, \"get_resources\", \"ResourceTagMappingList\")\n", " for resources in response:\n", " if resources[\"Tags\"]:\n", " #has tags\n", " tags = resources['Tags']\n", " for kv in resources['Tags']:\n", " if kv[\"Key\"] == tag:\n", " #we have found an expiration tag\n", " temp ={'arn': [resources[\"ResourceARN\"]], 'expires':kv[\"Value\"]}\n", " print(temp)\n", " result.append(temp)\n", "\n", " except Exception as error:\n", " result.append({\"error\":error})\n", "\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"Region\",\n", " \"tag\": \"expiration_tag\"\n", " }''')\n", "task.configure(outputName=\"ExpirationResources\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_resources_with_expiration_tag, lego_printer=aws_get_resources_with_expiration_tag_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "8be56b96-04b8-4518-afaa-157b4d530321", "metadata": { "jupyter": { "source_hidden": false }, "name": "Filter the Resources", "orderProperties": [], "tags": [], "title": "Filter the Resources" }, "source": [ "Now, we filter for ony resrouces that have expired, or are bout to expire, and display in list with a date picker.
\n", "\n", "
Updating the date will allow us to change the value to the expiration tag.
\n", "" ] }, { "cell_type": "code", "execution_count": 4, "id": "2f6628f1-6285-49fb-9423-2eeb0575043d", "metadata": { "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-06-07T14:37:42.200Z" }, "jupyter": { "source_hidden": true }, "name": "find resources about to expire", "orderProperties": [], "tags": [], "title": "find resources about to expire", "credentialsJson": {} }, "outputs": [], "source": [ "from datetime import datetime, timedelta\n", "\n", "#print(ExpirationResources)\n", "expiringList = []\n", "# Get the current date\n", "current_date = datetime.now()\n", "\n", "# Calculate the date days_to_expire days from now\n", "future_date = current_date + timedelta(days=days_to_expire)\n", "\n", "for resource in ExpirationResources:\n", " expires = datetime.strptime(resource['expires'], \"%m/%d/%Y\")\n", " if expires < future_date:\n", " expiringList.append(resource)\n", "\n", "print(expiringList)" ] }, { "cell_type": "code", "execution_count": 5, "id": "de6350ed-9d0c-45fe-8917-5e95d370eed7", "metadata": { "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-06-07T14:37:47.898Z" }, "jupyter": { "source_hidden": true }, "name": "select expirations to renew", "orderProperties": [], "tags": [], "title": "select expirations to renew", "credentialsJson": {} }, "outputs": [], "source": [ "from datetime import datetime\n", "import ipywidgets as widgets\n", "from IPython.display import display\n", "\n", "# Sample list of dictionaries\n", "\n", "\n", "def update_expiration_date(expiration_date, arn):\n", " for item in expiringList:\n", " if item[\"arn\"][0] == arn:\n", " item[\"expires\"] = expiration_date.strftime(\"%m/%d/%Y\")\n", " print(expiringList)\n", "\n", "def on_date_change(change):\n", " arn = change.owner.description\n", " expiration_date = change.new\n", " update_expiration_date(expiration_date, arn)\n", "\n", "# Create a date picker for each ARN\n", "for item in expiringList:\n", " expiration_date = datetime.strptime(item[\"expires\"], \"%m/%d/%Y\").date()\n", " date_picker = widgets.DatePicker(description=item[\"arn\"][0], \n", " style=dict(description_width='initial'),\n", " layout=dict(width='80%'),\n", " value=expiration_date)\n", " date_picker.observe(on_date_change, names='value')\n", " display(date_picker)\n" ] }, { "cell_type": "code", "execution_count": 5, "id": "14eaf63e-750e-40d1-aa57-2fde82fefba8", "metadata": { "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-06-06T18:40:55.402Z" }, "orderProperties": [], "tags": [], "credentialsJson": {} }, "outputs": [], "source": [ "print(expiringList)" ] }, { "cell_type": "markdown", "id": "ce65fdd0-ee64-42d0-90a6-0fe1c0f54608", "metadata": { "jupyter": { "source_hidden": false }, "name": "AWS Attach Tags to Resources", "orderProperties": [], "tags": [], "title": "AWS Attach Tags to Resources" }, "source": [ "
Here we will use unSkript AWS Attach Tags to Resources Lego.
\n", "\n", "
The updated dates from the date picker will be used to replace the current value in the expiration tag.
" ] }, { "cell_type": "code", "execution_count": 32, "id": "b0bf6aee-2b72-4348-8c38-fe3783619da6", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_AWS" ], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "878cb7819ecb4687ecfa8c6143365d10fe6b127adeb4a27fd71d06a3a2243d22", "checkEnabled": false, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "AWS Attach Tags to Resources", "execution_data": { "last_date_success_run_cell": "2023-06-05T21:11:47.279Z" }, "id": 260, "index": 260, "inputData": [ { "region": { "constant": false, "value": "Region" }, "resource_arn": { "constant": false, "value": "\"iter.get(\\\\\"arn\\\\\")\"" }, "tag_key": { "constant": false, "value": "expiration_tag" }, "tag_value": { "constant": false, "value": "\"iter.get(\\\\\"expires\\\\\")\"" } } ], "inputschema": [ { "properties": { "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "resource_arn": { "description": "Resource ARNs.", "items": {}, "title": "Resource ARN", "type": "array" }, "tag_key": { "description": "Resource Tag Key.", "title": "Tag Key", "type": "string" }, "tag_value": { "description": "Resource Tag Value.", "title": "Tag Value", "type": "string" } }, "required": [ "region", "resource_arn", "tag_key", "tag_value" ], "title": "aws_attach_tags_to_resources", "type": "object" } ], "isUnskript": false, "iterData": [ { "iter_enabled": true, "iter_item": { "resource_arn": "arn", "tag_value": "expires" }, "iter_list": { "constant": false, "objectItems": true, "value": "[x for x in expiringList]" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Attach Tags to Resources", "nouns": [], "orderProperties": [ "resource_arn", "tag_key", "tag_value", "region" ], "output": { "type": "" }, "outputParams": { "output_name": "test", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "tags": [ "aws_attach_tags_to_resources" ], "verbs": [] }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "import pprint\n", "from typing import Dict\n", "from pydantic import BaseModel, Field\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_attach_tags_to_resources_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def aws_attach_tags_to_resources(\n", " handle,\n", " resource_arn: list,\n", " tag_key: str,\n", " tag_value: str,\n", " region: str\n", " ) -> Dict:\n", " \"\"\"aws_attach_tags_to_resources Returns an Dict of resource info.\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type resource_arn: list\n", " :param resource_arn: Resource ARNs.\n", "\n", " :type tag_key: str\n", " :param tag_key: Resource Tag Key.\n", "\n", " :type tag_value: str\n", " :param tag_value: Resource Tag value.\n", "\n", " :type region: str\n", " :param region: Region to filter resources.\n", "\n", " :rtype: Dict of resource info.\n", " \"\"\"\n", " ec2Client = handle.client('resourcegroupstaggingapi', region_name=region)\n", " result = {}\n", " try:\n", " response = ec2Client.tag_resources(\n", " ResourceARNList=resource_arn,\n", " Tags={tag_key: tag_value}\n", " )\n", " result = response\n", "\n", " except Exception as error:\n", " result[\"error\"] = error\n", "\n", " return result\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=False)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"Region\",\n", " \"resource_arn\": \"iter.get(\\\\\"arn\\\\\")\",\n", " \"tag_key\": \"expiration_tag\",\n", " \"tag_value\": \"iter.get(\\\\\"expires\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"[x for x in expiringList]\",\n", " \"iter_parameter\": [\"resource_arn\",\"tag_value\"]\n", " }''')\n", "task.configure(outputName=\"test\")\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_attach_tags_to_resources, lego_printer=aws_attach_tags_to_resources_printer, hdl=hdl, args=args)" ] }, { "cell_type": "code", "execution_count": 9, "id": "67e94cc1-d88f-4eaf-a419-62903a7e8c7a", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_SLACK" ], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "6a87f83ab0ecfeecb9c98d084e2b1066c26fa64be5b4928d5573a5d60299802d", "checkEnabled": false, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Post Slack Message", "execution_data": { "last_date_success_run_cell": "2023-06-07T14:42:45.225Z" }, "id": 106, "index": 106, "inputData": [ { "channel": { "constant": false, "value": "\"devrel_doug_test1\"" }, "message": { "constant": false, "value": "f\"There are {len(expiringList)} AWS resources set to expire in the next {days_to_expire} days! Use the AWS Resources About To Expire RunBook to manually update these dates to avoid any deletion of important resources\"" } } ], "inputschema": [ { "properties": { "channel": { "description": "Name of slack channel.", "title": "Channel", "type": "string" }, "message": { "description": "Message for slack channel.", "title": "Message", "type": "string" } }, "required": [ "channel", "message" ], "title": "slack_post_message", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_SLACK", "name": "Post Slack Message", "nouns": [], "orderProperties": [ "channel", "message" ], "output": { "type": "" }, "printOutput": true, "tags": [ "slack_post_message" ], "verbs": [] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "\n", "import pprint\n", "from pydantic import BaseModel, Field\n", "from beartype import beartype\n", "from slack_sdk import WebClient\n", "from slack_sdk.errors import SlackApiError\n", "\n", "pp = pprint.PrettyPrinter(indent=2)\n", "\n", "from beartype import beartype\n", "@beartype\n", "def slack_post_message_printer(output):\n", " if output is not None:\n", " pprint.pprint(output)\n", " else:\n", " return\n", "\n", "\n", "@beartype\n", "@beartype\n", "def slack_post_message(\n", " handle: WebClient,\n", " channel: str,\n", " message: str) -> str:\n", "\n", " try:\n", " handle.chat_postMessage(\n", " channel=channel,\n", " text=message)\n", " return f\"Successfully Sent Message on Channel: #{channel}\"\n", " except SlackApiError as e:\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.response['error']}\")\n", " if e.response['error'] == 'channel_not_found':\n", " raise Exception('Channel Not Found') from e\n", " if e.response['error'] == 'duplicate_channel_not_found':\n", " raise Exception('Channel associated with the message_id not valid') from e\n", " if e.response['error'] == 'not_in_channel':\n", " raise Exception('Cannot post message to channel user is not in') from e\n", " if e.response['error'] == 'is_archived':\n", " raise Exception('Channel has been archived') from e\n", " if e.response['error'] == 'msg_too_long':\n", " raise Exception('Message text is too long') from e\n", " if e.response['error'] == 'no_text':\n", " raise Exception('Message text was not provided') from e\n", " if e.response['error'] == 'restricted_action':\n", " raise Exception('Workspace preference prevents user from posting') from e\n", " if e.response['error'] == 'restricted_action_read_only_channel':\n", " raise Exception('Cannot Post message, read-only channel') from e\n", " if e.response['error'] == 'team_access_not_granted':\n", " raise Exception('The token used is not granted access to the workspace') from e\n", " if e.response['error'] == 'not_authed':\n", " raise Exception('No Authtnecition token provided') from e\n", " if e.response['error'] == 'invalid_auth':\n", " raise Exception('Some aspect of Authentication cannot be validated. Request denied') from e\n", " if e.response['error'] == 'access_denied':\n", " raise Exception('Access to a resource specified in the request denied') from e\n", " if e.response['error'] == 'account_inactive':\n", " raise Exception('Authentication token is for a deleted user') from e\n", " if e.response['error'] == 'token_revoked':\n", " raise Exception('Authentication token for a deleted user has been revoked') from e\n", " if e.response['error'] == 'no_permission':\n", " raise Exception('The workspace toekn used does not have necessary permission to send message') from e\n", " if e.response['error'] == 'ratelimited':\n", " raise Exception('The request has been ratelimited. Retry sending message later') from e\n", " if e.response['error'] == 'service_unavailable':\n", " raise Exception('The service is temporarily unavailable') from e\n", " if e.response['error'] == 'fatal_error':\n", " raise Exception('The server encountered catostrophic error while sending message') from e\n", " if e.response['error'] == 'internal_error':\n", " raise Exception('The server could not complete operation, likely due to transietn issue') from e\n", " if e.response['error'] == 'request_timeout':\n", " raise Exception('Sending message error via POST: either message was missing or truncated') from e\n", " else:\n", " raise Exception(f'Failed Sending Message to slack channel {channel} Error: {e.response[\"error\"]}') from e\n", "\n", " except Exception as e:\n", " print(\"\\n\\n\")\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {str(e)}\")\n", " return f\"Unable to send message on {channel}\"\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"channel\": \"\\\\\"devrel_doug_test1\\\\\"\",\n", " \"message\": \"f\\\\\"There are {len(expiringList)} AWS resources set to expire in the next {days_to_expire} days! Use the AWS Resources About To Expire RunBook to manually update these dates to avoid any deletion of important resources\\\\\"\"\n", " }''')\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(slack_post_message, lego_printer=slack_post_message_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "a8280ac4-d504-44d2-b5ea-d97f7ca672c8", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "In this Runbook, we demonstrated the use of unSkript's AWS legos to attach tags. This Runbook gets the list of all untagged resources of a given region, discovers tag keys of the given region and attaches mandatory tags to all the untagged resource. To view the full platform capabilities of unSkript please visit https://unskript.com" ] } ], "metadata": { "execution_data": { "runbook_name": "AWS Add Tags Across Selected AWS Resources", "parameters": [ "expiration_tag", "days_to_expire", "Region" ] }, "kernelspec": { "display_name": "unSkript (Build: 1185)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "outputParameterSchema": null, "parameterSchema": { "definitions": null, "properties": { "Region": { "default": "us-west-2", "description": "Resources Region", "title": "Region", "type": "string" }, "days_to_expire": { "default": 15, "description": "Find resources set to expire in the next days_to_expire days.", "title": "days_to_expire", "type": "number" }, "expiration_tag": { "default": "expiration", "description": "The name of the tag that is used to identify the Resource expiration", "title": "expiration_tag", "type": "string" } }, "required": [], "title": "Schema", "type": "object" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_Update_Resources_About_To_Expire.json ================================================ { "name": "AWS Add Tags Across Selected AWS Resources", "description": "This finds resources missing a tag, and allows you to choose which resources should add a specific tag/value pair.", "uuid": "a79201f821993867e23dd9603ed7ef5523324353d717c566f902f7ac6e471f5c", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_CLOUDOPS" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/AWS_encrypt_unencrypted_S3_buckets.ipynb ================================================ { "cells": [ { "attachments": {}, "cell_type": "markdown", "id": "cbabc8b5-57b4-45b8-890c-370bb1ed6f02", "metadata": { "jupyter": { "source_hidden": false }, "name": "Runbook Overview", "orderProperties": [], "tags": [], "title": "Runbook Overview" }, "source": [ "In this action, we list all the available regions from AWS if the user does not provide a region as a parameter.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 3, "id": "53f85394-1036-40b4-922f-c8d72c50acd6", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "708ea4af5f8fe7096a15b3a52c4a657606bab9e177386fad7a847341ed607d64", "condition_enabled": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "List all available AWS Regions", "execution_data": { "last_date_success_run_cell": "2023-02-03T04:42:43.566Z" }, "id": 215, "index": 215, "inputschema": [ { "properties": {}, "title": "aws_list_all_regions", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS List All Regions", "nouns": [ "regions", "aws" ], "orderProperties": [], "output": { "type": "" }, "outputParams": { "output_name": "region", "output_name_enabled": true }, "printOutput": true, "startcondition": "not region", "tags": [ "aws_list_all_regions" ], "verbs": [ "list" ] }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2021 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "from typing import Dict, List\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_list_all_regions_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_list_all_regions(handle) -> List:\n", " \"\"\"aws_list_all_regions lists all the AWS regions\n", "\n", " :type handle: object\n", " :param handle: Object returned from Task Validate\n", "\n", " :rtype: Result List of result\n", " \"\"\"\n", "\n", " result = handle.aws_cli_command(\"aws ec2 describe-regions --all-regions --query 'Regions[].{Name:RegionName}' --output text\")\n", " if result is None or result.returncode != 0:\n", " print(\"Error while executing command : {}\".format(result))\n", " return str()\n", " result_op = list(result.stdout.split(\"\\n\"))\n", " list_region = [x for x in result_op if x != '']\n", " return list_region\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not region\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"region\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_list_all_regions, lego_printer=aws_list_all_regions_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "38f0ef87-76cb-4505-b012-5681855c9920", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1", "orderProperties": [], "tags": [], "title": "Step-1" }, "source": [ "Output variable:
\n", "region
Here we will use unSkript Filter Unencrypted S3 Buckets action. This action filters all the S3 buckets from the given region and returns a list of those S3 buckets without encryption. It will execute if the bucket_name parameter is not given.
\n", "\n", "\n", "Input parameters:
\n", "region
\n", "" ] }, { "cell_type": "code", "execution_count": 4, "id": "14360884-0e4a-4b33-8e08-f0f5c3cf7ad5", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "2fa5c0d3a9ed5951fbf2a1390610941af8e145521c244fa07b597d6ca6665a43", "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Filter AWS Unencrypted S3 Buckets", "execution_data": { "last_date_success_run_cell": "2023-02-03T04:44:13.354Z" }, "id": 235, "index": 235, "inputData": [ { "region": { "constant": false, "value": "iter_item" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region.", "title": "Region", "type": "string" } }, "title": "aws_filter_unencrypted_s3_buckets", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": "region", "iter_list": { "constant": false, "objectItems": false, "value": "region" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Filter AWS Unencrypted S3 Buckets", "nouns": [], "orderProperties": [ "region" ], "output": { "type": "" }, "outputParams": { "output_name": "unencrypted_buckets", "output_name_enabled": true }, "printOutput": true, "startcondition": "not bucket_name", "tags": [ "aws_filter_unencrypted_s3_buckets" ], "title": "Filter AWS Unencrypted S3 Buckets", "verbs": [] }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Optional\n", "from unskript.legos.utils import CheckOutput, CheckOutputStatus\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "from botocore.exceptions import ClientError\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_filter_unencrypted_s3_buckets_printer(output):\n", " if output is None:\n", " return\n", "\n", " if isinstance(output, CheckOutput):\n", " print(output.json())\n", " else:\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_filter_unencrypted_s3_buckets(handle, region: str = \"\") -> CheckOutput:\n", " \"\"\"aws_filter_unencrypted_s3_buckets List of unencrypted S3 bucket name .\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type region: string\n", " :param region: Filter S3 buckets.\n", "\n", " :rtype: CheckOutput with status result and list of unencrypted S3 bucket name.\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", " for reg in all_regions:\n", " try:\n", " s3Client = handle.client('s3', region_name=reg)\n", " response = s3Client.list_buckets()\n", " # List unencrypted S3 buckets\n", " for bucket in response['Buckets']:\n", " try:\n", " response = s3Client.get_bucket_encryption(Bucket=bucket['Name'])\n", " encRules = response['ServerSideEncryptionConfiguration']['Rules']\n", " except ClientError as e:\n", " bucket_dict = {}\n", " bucket_dict[\"region\"] = reg\n", " bucket_dict[\"bucket\"] = bucket['Name']\n", " result.append(bucket_dict)\n", " except Exception as error:\n", " pass\n", "\n", " if len(result) != 0:\n", " return CheckOutput(status=CheckOutputStatus.FAILED,\n", " objects=result,\n", " error=str(\"\"))\n", " else:\n", " return CheckOutput(status=CheckOutputStatus.SUCCESS,\n", " objects=result,\n", " error=str(\"\"))\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(outputName=\"unencrypted_buckets\")\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"iter_item\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"region\",\n", " \"iter_parameter\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not bucket_name\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_filter_unencrypted_s3_buckets, lego_printer=aws_filter_unencrypted_s3_buckets_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "f2ed3b50-50f4-4983-b409-690aecf27b1c", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 Extension", "orderProperties": [], "tags": [], "title": "Step-1 Extension" }, "source": [ "Output variable:
\n", "unencrypted_buckets
In this action, we modify the output from step 1 and return a list of dictionary items for the Unencrypted S3 Buckets
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 4, "id": "47117b25-2533-4021-b4f3-329b7fee165e", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-10T10:31:04.455Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Modify Step-1 Output", "orderProperties": [], "tags": [], "title": "Modify Step-1 Output", "trusted": true }, "outputs": [], "source": [ "bucket_list = []\n", "\n", "try:\n", " for k, v in unencrypted_buckets.items():\n", " if v.status == CheckOutputStatus.FAILED:\n", " for bucket in v.objects:\n", " bucket_list.append(bucket)\n", "except Exception as e:\n", " for i in bucket_name:\n", " data_dict = {}\n", " data_dict[\"region\"] = region[0]\n", " data_dict[\"bucket\"] = i\n", " bucket_list.append(data_dict)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "0a1ba685-0340-4af8-9bc7-32e9beff2837", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "Output variable: ebs_list
\n", "
Here we will use the unSkript Apply AWS Default Encryption for the S3 Buckets action. In this action, we will apply the default encryption configuration to the unencrypted S3 buckets by passing the list of unencrypted S3 buckets from step 1.
\n", "\n", "\n", "Input parameters:
\n", "name,region
\n", "" ] }, { "cell_type": "code", "execution_count": 2, "id": "80b2e9a4-023a-4235-99ba-dce06988eb6e", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "eb57da3b21aec38d005bf0355a48ba53937c7ac62f98e9c968c9501412d72008", "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Apply a New AWS Policy for S3 Bucket", "execution_data": { "last_date_success_run_cell": "2022-08-26T20:00:28.237Z" }, "id": 135, "index": 135, "inputData": [ { "name": { "constant": false, "value": "\"iter.get(\\\\\"bucket\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "name": { "default": "", "description": "Name of the bucket.", "title": "Bucket name", "type": "string" }, "region": { "default": "", "description": "AWS region of the bucket.", "title": "Region", "type": "string" } }, "required": [ "name", "policy", "region" ], "title": "aws_put_bucket_policy", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "name": "bucket", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "bucket_list" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Apply AWS Default Encryption for S3 Bucket", "nouns": [ "aws", "policy", "bucket" ], "orderProperties": [ "name", "region" ], "output": { "type": "" }, "outputParams": { "output_name": "apply_output", "output_name_enabled": true }, "printOutput": true, "startcondition": "len(bucket_list) > 0", "tags": [ "aws_put_bucket_policy" ], "title": "Apply AWS Default Encryption for S3 Bucket", "verbs": [ "apply" ] }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Dict\n", "import json\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_put_bucket_encryption_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_put_bucket_encryption(handle, name: str, region: str) -> Dict:\n", " \"\"\"aws_put_bucket_encryption Puts default encryption configuration for bucket.\n", "\n", " :type name: string\n", " :param name: NAme of the S3 bucket.\n", "\n", " :type region: string\n", " :param region: location of the bucket\n", "\n", " :rtype: Dict with the response info.\n", " \"\"\"\n", " s3Client = handle.client('s3',\n", " region_name=region)\n", "\n", " # Setup default encryption configuration \n", " response = s3Client.put_bucket_encryption(\n", " Bucket=name,\n", " ServerSideEncryptionConfiguration={\n", " \"Rules\": [\n", " {\"ApplyServerSideEncryptionByDefault\": {\"SSEAlgorithm\": \"AES256\"}}\n", " ]},\n", " )\n", " return response\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\",\n", " \"name\": \"iter.get(\\\\\"bucket\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"bucket_list\",\n", " \"iter_parameter\": [\"name\",\"region\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(bucket_list) > 0\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"apply_output\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_put_bucket_encryption, lego_printer=aws_put_bucket_encryption_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "dea3003f-03e9-4dff-86fb-b4073ee4ef79", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "### Conclusion\n", "Output variable:
\n", "apply_output
In this Runbook, we demonstrated the use of unSkript's AWS legos to filter all unencrypted S3 buckets and apply default encryption configuration to the buckets. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Encrypt unencrypted S3 buckets", "parameters": [ "bucket_name", "region" ] }, "kernelspec": { "display_name": "base", "language": "python", "name": "python3" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3", "version": "3.9.12" }, "parameterSchema": { "properties": { "bucket_name": { "description": "list of S3 bucket Name", "title": "bucket_name", "type": "array" }, "region": { "description": "AWS Region e.g.[\"us-west-2\"]", "title": "region", "type": "array" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": null, "vscode": { "interpreter": { "hash": "5e269198fab4eb2ea6fe7c886c38b87b334869f0501ab924e1d16d60aeba5d23" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/AWS_encrypt_unencrypted_S3_buckets.json ================================================ { "name": "Encrypt unencrypted S3 buckets", "description": "This runbook can be used to filter all the S3 buckets which are unencrypted and apply encryption on unencrypted S3 buckets.", "uuid": "50d9c6abd7dce3ff9183d4135353e82859bc5a9639455b35bd229331be6048df", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS","CATEGORY_TYPE_SECOPS" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/Add_new_IAM_user.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "8a97b231-94d6-4e10-a24c-6eac9a4572e4", "metadata": { "jupyter": { "source_hidden": false }, "name": "Add New IAM User", "orderProperties": [], "tags": [], "title": "Add New IAM User" }, "source": [ "Here we will use unSkript Create New IAM User action. This action creates an IAM user in AWS and assigns the given tag to the user.
\n", "\n", "\n", "Input parameters:
\n", "user_name,tag_key,tag_value
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "9fe78a10-d76f-4961-8e5c-bf381c5b3cc9", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "3f71dd060d5955f5dc9104dbaf418bf957b2222c510cb3afd09ded8e41e433d9", "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Create New IAM User", "id": 222, "index": 222, "inputData": [ { "tag_key": { "constant": false, "value": "tag_key" }, "tag_value": { "constant": false, "value": "tag_value" }, "user_name": { "constant": false, "value": "username" } } ], "inputschema": [ { "properties": { "tag_key": { "description": "Tag Key to new IAM User.", "title": "Tag Key", "type": "string" }, "tag_value": { "description": "Tag Value to new IAM User.", "title": "Tag Value", "type": "string" }, "user_name": { "description": "IAM User Name.", "title": "User Name", "type": "string" } }, "required": [ "user_name", "tag_key", "tag_value" ], "title": "aws_create_iam_user", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Create New IAM User", "nouns": [ "aws", "IAM", "user" ], "orderProperties": [ "user_name", "tag_key", "tag_value" ], "output": { "type": "" }, "outputParams": { "output_name": "user_details", "output_name_enabled": true }, "printOutput": true, "tags": [ "aws_create_iam_user" ], "title": "Create New IAM User", "verbs": [ "create" ] }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import List, Dict\n", "from pydantic import BaseModel, Field\n", "from botocore.exceptions import ClientError\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_create_iam_user_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_create_iam_user(handle, user_name: str, tag_key: str, tag_value: str) -> Dict:\n", " \"\"\"aws_create_iam_user Creates new IAM User.\n", "\n", " :type handle: object\n", " :param handle: Object returned by the task.validate(...) method\n", "\n", " :type user_name: string\n", " :param user_name: Name of new IAM User.\n", "\n", " :type tag_key: string\n", " :param tag_key: Tag Key assign to new User.\n", "\n", " :type tag_value: string\n", " :param tag_value: Tag Value assign to new User.\n", "\n", " :rtype: Dict with the stopped instances state info.\n", " \"\"\"\n", "\n", " ec2Client = handle.client(\"iam\")\n", " result = {}\n", " try:\n", " response = ec2Client.create_user(\n", " UserName=user_name,\n", " Tags=[\n", " {\n", " 'Key': tag_key,\n", " 'Value': tag_value\n", " }])\n", " result = response\n", " except ClientError as error:\n", " if error.response['Error']['Code'] == 'EntityAlreadyExists':\n", " result = error.response\n", " else:\n", " result = error.response\n", "\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(inputParamsJson='''{\n", " \"tag_key\": \"tag_key\",\n", " \"tag_value\": \"tag_value\",\n", " \"user_name\": \"username\"\n", " }''')\n", "task.configure(outputName=\"user_details\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_create_iam_user, lego_printer=aws_create_iam_user_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "c174d638-f107-450f-ab2d-d28cf097a722", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "Output variable:
\n", "user_details
This action only executes when step 1 successfully creates a user. In this action, we will pass the newly created username and temporary password, which will create an user profile for the user in AWS.
\n", "\n", "\n", "Input parameters:
\n", "user_name,password
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "35887cbc-bdb1-4f3b-8f59-a2bb78e9b605", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "7b52e5fdfddd113a1c489d95d5fd8c9a98043c6ea721588531db6a5261434975", "condition_enabled": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Create Login profile for IAM User", "id": 166, "index": 166, "inputData": [ { "password": { "constant": false, "value": "password" }, "user_name": { "constant": false, "value": "username" } } ], "inputschema": [ { "properties": { "password": { "description": "Password for IAM User.", "title": "Password", "type": "string" }, "user_name": { "description": "IAM User Name.", "title": "User Name", "type": "string" } }, "required": [ "user_name", "password" ], "title": "aws_create_user_login_profile", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Create Login profile for IAM User", "nouns": [ "aws", "IAM", "login" ], "orderProperties": [ "user_name", "password" ], "output": { "type": "" }, "outputParams": { "output_name": "profile_details", "output_name_enabled": true }, "printOutput": true, "startcondition": "'User' in UserInfo", "tags": [ "aws_create_user_login_profile" ], "verbs": [ "create" ] }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Dict\n", "from botocore.exceptions import ClientError\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_create_user_login_profile_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_create_user_login_profile(handle, user_name: str, password: str) -> Dict:\n", " \"\"\"aws_create_user_login_profile Create login profile for IAM User.\n", "\n", " :type handle: object\n", " :param handle: Object returned by the task.validate(...) method.\n", "\n", " :type user_name: string\n", " :param user_name: Name of new IAM User.\n", "\n", " :type password: string\n", "\n", " :rtype: Dict with the Profile Creation status info.\n", " \"\"\"\n", "\n", " ec2Client = handle.client(\"iam\")\n", " result = {}\n", " try:\n", " response = ec2Client.create_login_profile(\n", " UserName=user_name,\n", " PasswordResetRequired=True)\n", "\n", " result = response\n", " except ClientError as error:\n", " if error.response['Error']['Code'] == 'EntityAlreadyExists':\n", " result = error.response\n", " else:\n", " result = error.response\n", "\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(inputParamsJson='''{\n", " \"password\": \"password\",\n", " \"user_name\": \"username\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"'User' in UserInfo\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"profile_details\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_create_user_login_profile, lego_printer=aws_create_user_login_profile_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "29511895-d1cc-4a01-9990-8928642b5006", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-3", "orderProperties": [], "tags": [], "title": "Step-3" }, "source": [ "Output variable:
\n", "profile_details
Here we will use unSkript Get Caller Identity Action action. These Action does not take any inputs. shows the caller's identity for the current user.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 5, "id": "dd1e1542-ddd7-4b86-86a2-17e999458fbd", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "afacde59-a401-4a8b-901d-46c4b3970b78", "createTime": "2022-07-27T16:51:48Z", "credentialsJson": {}, "currentVersion": "v0.0.0", "description": "Test", "execution_data": { "last_date_success_run_cell": "2022-09-02T16:44:27.574Z" }, "id": 100001, "index": 100001, "inputschema": [ { "properties": {}, "required": [ "instance_ids", "region" ], "title": "aws_restart_ec2_instances_test", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Get Caller Identity ", "nouns": [], "orderProperties": [], "output": { "type": "" }, "outputParams": { "output_name": "caller_details", "output_name_enabled": true }, "printOutput": true, "tags": [], "title": "Get Caller Identity ", "verbs": [] }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import List, Dict\n", "from pydantic import BaseModel, Field\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_caller_identity_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_get_caller_identity(handle) -> Dict:\n", " ec2Client = handle.client('sts')\n", " response = ec2Client.get_caller_identity()\n", "\n", " return response\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(outputName=\"caller_details\")\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_caller_identity, lego_printer=aws_get_caller_identity_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "d1f05583-fa8c-4f8c-a357-3f6154df4620", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-4", "orderProperties": [], "tags": [], "title": "Step-4" }, "source": [ "Output variable:
\n", "caller_details
Here we will use unSkript Post Slack Message action. These actions send a message on the Slack channel with the newly created username.
\n", "\n", "\n", "Input parameters:
\n", "channel,message
\n", "" ] }, { "cell_type": "code", "execution_count": 6, "id": "8cacd129-1fed-4c9e-9f2f-70da41c43c88", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "6a87f83ab0ecfeecb9c98d084e2b1066c26fa64be5b4928d5573a5d60299802d", "condition_enabled": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Post Slack Message", "execution_data": { "last_date_success_run_cell": "2022-09-02T16:45:12.785Z" }, "id": 62, "index": 62, "inputData": [ { "channel": { "constant": false, "value": "Channel_Name" }, "message": { "constant": false, "value": "\"New IAM user {}\".format(user_name)" } } ], "inputschema": [ { "properties": { "channel": { "default": "", "description": "Name of the slack channel where the message to be posted", "title": "Channel", "type": "string" }, "message": { "default": "", "description": "Message to be sent", "title": "Message", "type": "string" } }, "required": [ "channel", "message" ], "title": "slack_post_message", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_SLACK", "name": "Post Slack Message", "nouns": [ "slack", "message" ], "orderProperties": [ "channel", "message" ], "output": { "type": "" }, "outputParams": { "output_name": "send_status", "output_name_enabled": true }, "printOutput": true, "startcondition": "'User' in UserInfo and not channel", "tags": [ "slack_post_message" ], "verbs": [ "post" ] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "\n", "import pprint\n", "\n", "from pydantic import BaseModel, Field\n", "from slack_sdk import WebClient\n", "from slack_sdk.errors import SlackApiError\n", "\n", "pp = pprint.PrettyPrinter(indent=2)\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def slack_post_message_printer(output):\n", " if output is not None:\n", " pprint.pprint(output)\n", " else:\n", " return\n", "\n", "\n", "@beartype\n", "def slack_post_message(\n", " handle: WebClient,\n", " channel: str,\n", " message: str) -> str:\n", "\n", " try:\n", " response = handle.chat_postMessage(\n", " channel=channel,\n", " text=message)\n", " return f\"Successfuly Sent Message on Channel: #{channel}\"\n", " except SlackApiError as e:\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.response['error']}\")\n", " if e.response['error'] == 'channel_not_found':\n", " raise Exception('Channel Not Found')\n", " elif e.response['error'] == 'duplicate_channel_not_found':\n", " raise Exception('Channel associated with the message_id not valid')\n", " elif e.response['error'] == 'not_in_channel':\n", " raise Exception('Cannot post message to channel user is not in')\n", " elif e.response['error'] == 'is_archived':\n", " raise Exception('Channel has been archived')\n", " elif e.response['error'] == 'msg_too_long':\n", " raise Exception('Message text is too long')\n", " elif e.response['error'] == 'no_text':\n", " raise Exception('Message text was not provided')\n", " elif e.response['error'] == 'restricted_action':\n", " raise Exception('Workspace preference prevents user from posting')\n", " elif e.response['error'] == 'restricted_action_read_only_channel':\n", " raise Exception('Cannot Post message, read-only channel')\n", " elif e.response['error'] == 'team_access_not_granted':\n", " raise Exception('The token used is not granted access to the workspace')\n", " elif e.response['error'] == 'not_authed':\n", " raise Exception('No Authtnecition token provided')\n", " elif e.response['error'] == 'invalid_auth':\n", " raise Exception('Some aspect of Authentication cannot be validated. Request denied')\n", " elif e.response['error'] == 'access_denied':\n", " raise Exception('Access to a resource specified in the request denied')\n", " elif e.response['error'] == 'account_inactive':\n", " raise Exception('Authentication token is for a deleted user')\n", " elif e.response['error'] == 'token_revoked':\n", " raise Exception('Authentication token for a deleted user has been revoked')\n", " elif e.response['error'] == 'no_permission':\n", " raise Exception('The workspace toekn used does not have necessary permission to send message')\n", " elif e.response['error'] == 'ratelimited':\n", " raise Exception('The request has been ratelimited. Retry sending message later')\n", " elif e.response['error'] == 'service_unavailable':\n", " raise Exception('The service is temporarily unavailable')\n", " elif e.response['error'] == 'fatal_error':\n", " raise Exception('The server encountered catostrophic error while sending message')\n", " elif e.response['error'] == 'internal_error':\n", " raise Exception('The server could not complete operation, likely due to transietn issue')\n", " elif e.response['error'] == 'request_timeout':\n", " raise Exception('Sending message error via POST: either message was missing or truncated')\n", " else:\n", " raise Exception(f'Failed Sending Message to slack channel {channel} Error: {e.response[\"error\"]}')\n", "\n", " except Exception as e:\n", " print(\"\\n\\n\")\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.__str__()}\")\n", " return f\"Unable to send message on {channel}\"\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(inputParamsJson='''{\n", " \"channel\": \"Channel_Name\",\n", " \"message\": \"\\\\\"New IAM user {}\\\\\".format(user_name)\"\n", " }''')\n", "\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"'User' in UserInfo and not channel\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"send_status\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(slack_post_message, lego_printer=slack_post_message_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "e9df5398-15b1-4279-92b8-d4c62372afed", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "### Conclusion\n", "In this Runbook, we demonstrated the use of unSkript's AWS and slack actions to perform AWS create new IAM user, login profile and also show the caller identity of the user. On Success, post a message on the slack channel about the User creation. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io" ] } ], "metadata": { "execution_data": { "runbook_name": "Create a new AWS IAM User", "parameters": [ "channel", "password", "username" ] }, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3", "version": "3.10.6 (tags/v3.10.6:9c7b4bd, Aug 1 2022, 21:53:49) [MSC v.1932 64 bit (AMD64)]" }, "parameterSchema": { "properties": { "channel": { "description": "Slack Channel Name to send the new User Information. Example random, general", "title": "channel", "type": "string" }, "password": { "description": "Login profile password for new IAM user.", "format": "password", "title": "password", "type": "string", "writeOnly": true }, "username": { "description": "Name of the user that needs to be created", "title": "username", "type": "string" } }, "required": [ "username", "password" ], "title": "Schema", "type": "object" }, "parameterValues": {}, "vscode": { "interpreter": { "hash": "e8899eb02dfbc033aab5733bdae1bd213fa031d40331094008e8673d99ebab63" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/Add_new_IAM_user.json ================================================ { "name": "Create a new AWS IAM User", "description": "AWS has an inbuilt identity and access management system known as AWS IAM. IAM supports the concept of users, group, roles and privileges. IAM user is an identity that can be created and assigned some privileges. This runbook can be used to create an AWS IAM User", "uuid": "924025582b6c1b3ea3c8c834f1ee430a2df8bd42c5119191cb5c5da3121f1d18", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/Configure_url_endpoint_on_a_cloudwatch_alarm.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "02550ae3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "Output variable:
\n", "send_status
\n", "
1) Attach a webhook endpoint to AWS Cloudwatch alarm
" ] }, { "cell_type": "markdown", "id": "943a923f", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 1", "orderProperties": [], "tags": [], "title": "Step 1" }, "source": [ "Here we will configure the url endpoint to the SNS associated with a cloudwatch alarm.
\n", "\n", "\n", "This action takes the following parameters:
\n", "alarm_name, region, region
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "60e41cc8-b61f-4104-a41c-f084bce38740", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "e591113f7afc699ee564d67ef912ea2d689acc91d7640a2a05e68c039153bd33", "checkEnabled": false, "collapsed": true, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Attach a webhook endpoint to one of the SNS attached to the AWS Cloudwatch alarm.", "id": 213, "index": 213, "inputschema": [ { "properties": { "alarm_name": { "description": "Cloudwatch alarm name.", "title": "Alarm name", "type": "string" }, "region": { "description": "AWS Region of the cloudwatch.", "title": "Region", "type": "string" }, "url": { "description": "URL where the alarm notification needs to be sent. URL should start with http or https.", "title": "URL", "type": "string" } }, "required": [ "alarm_name", "region", "url" ], "title": "aws_cloudwatch_attach_webhook_notification_to_alarm", "type": "object" } ], "isUnskript": false, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Attach a webhook endpoint to AWS Cloudwatch alarm", "nouns": [], "orderProperties": [ "alarm_name", "region", "url" ], "output": { "type": "" }, "printOutput": true, "tags": [ "aws_cloudwatch_attach_webhook_notification_to_alarm" ], "title": "Attach a webhook endpoint to AWS Cloudwatch alarm", "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "\n", "from pydantic import BaseModel, Field\n", "from unskript.legos.aws.aws_get_handle.aws_get_handle import Session\n", "from urllib.parse import urlparse\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_cloudwatch_attach_webhook_notification_to_alarm_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint({\"Subscription ARN\" : output})\n", "\n", "\n", "@beartype\n", "def aws_cloudwatch_attach_webhook_notification_to_alarm(\n", " hdl: Session,\n", " alarm_name: str,\n", " region: str,\n", " url: str\n", ") -> str:\n", " \"\"\"aws_cloudwatch_attach_webhook_notification_to_alarm returns subscriptionArn\n", "\n", " :type alarm_name: string\n", " :param alarm_name: Cloudwatch alarm name.\n", "\n", " :type url: string\n", " :param url: URL where the alarm notification needs to be sent.\n", "\n", " :type region: string\n", " :param region: AWS Region of the cloudwatch.\n", "\n", " :rtype: Returns subscriptionArn\n", " \"\"\"\n", " cloudwatchClient = hdl.client(\"cloudwatch\", region_name=region)\n", "\n", " # Get the configured SNS(es) to this alarm.\n", " alarmDetail = cloudwatchClient.describe_alarms(\n", " AlarmNames=[alarm_name]\n", " )\n", " if alarmDetail is None:\n", " return f'Alarm {alarm_name} not found in AWS region {region}'\n", " # Need to get the AlarmActions from either composite or metric field.\n", " if len(alarmDetail['CompositeAlarms']) > 0:\n", " snses = alarmDetail['CompositeAlarms'][0]['AlarmActions']\n", " else:\n", " snses = alarmDetail['MetricAlarms'][0]['AlarmActions']\n", "\n", " #Pick any sns to configure the url endpoint.\n", " if len(snses) == 0:\n", " return f'No SNS configured for alarm {alarm_name}'\n", "\n", " snsArn = snses[0]\n", " print(f'Configuring url endpoint on SNS {snsArn}')\n", "\n", " snsClient = hdl.client('sns', region_name=region)\n", " # Figure out the protocol from the url\n", " try:\n", " parsedURL = urlparse(url)\n", " except Exception as e:\n", " print(f'Invalid URL {url}, {e}')\n", " raise e\n", "\n", " if parsedURL.scheme != 'http' and parsedURL.scheme != 'https':\n", " return f'Invalid URL {url}'\n", "\n", " protocol = parsedURL.scheme\n", " try:\n", " response = snsClient.subscribe(\n", " TopicArn=snsArn,\n", " Protocol=protocol,\n", " Endpoint=url,\n", " ReturnSubscriptionArn=True)\n", " except Exception as e:\n", " print(f'Subscribe to SNS topic arn {snsArn} failed, {e}')\n", " raise e\n", " subscriptionArn = response['SubscriptionArn']\n", " print(f'URL {url} subscribed to SNS {snsArn}, subscription ARN {subscriptionArn}')\n", " return subscriptionArn\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_cloudwatch_attach_webhook_notification_to_alarm, lego_printer=aws_cloudwatch_attach_webhook_notification_to_alarm_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "93fbb5a1", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action captures the following output:
\n", "None
In this Runbook, we demonstrated the use of unSkript's AWS legos to configure the url endpoint to the SNS associated with a cloudwatch alarm. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Configure URL endpoint on a AWS CloudWatch alarm", "parameters": [ "Region", "URL", "AlarmName" ] }, "kernelspec": { "display_name": "Python 3.10.6 64-bit", "language": "python", "name": "python3" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "parameterSchema": { "properties": { "AlarmName": { "description": "Name of the AWS Alarm", "title": "AlarmName", "type": "string" }, "Region": { "description": "AWS Region of the alarm", "title": "Region", "type": "string" }, "URL": { "description": "URL to be attached to the SNS ", "title": "URL", "type": "string" } }, "required": [ "AlarmName", "Region", "URL" ], "title": "Schema", "type": "object" }, "parameterValues": {}, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/Configure_url_endpoint_on_a_cloudwatch_alarm.json ================================================ { "name": "Configure URL endpoint on a AWS CloudWatch alarm", "description": "Configures the URL endpoint to the SNS associated with a CloudWatch alarm. This allows to external functions to be invoked within unSkript in response to an alert getting generated. Alarms can be attached to the handlers to perform data enrichment or remediation", "uuid": "196a6ad5bd13b29d0a3acbf3227b134f7a38777cb1051928f0cb456845c643e0", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/Copy_ami_to_all_given_AWS_regions.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "07894245-58bd-475f-b722-8d7513fbe063", "metadata": { "jupyter": { "source_hidden": false }, "name": "Copy AMI to All Given AWS Regions", "orderProperties": [], "tags": [], "title": "Copy AMI to All Given AWS Regions" }, "source": [ "\n", "
1) Find unused NAT gateways
2) Delete unused NAT gateways
This action fetches all AWS Regions to execute Step 1\ud83d\udc47. This action will only execute if no region is provided.
\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": 7, "id": "8a580cb0-7c57-4c8a-af46-f23f607931fa", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionOutputType": null, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "708ea4af5f8fe7096a15b3a52c4a657606bab9e177386fad7a847341ed607d64", "collapsed": true, "condition_enabled": true, "credentialsJson": {}, "description": "List all available AWS Regions", "execution_data": { "last_date_success_run_cell": "2023-04-18T14:15:19.579Z" }, "id": 1, "index": 1, "inputschema": [ { "properties": {}, "title": "aws_list_all_regions", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "metadata": { "action_bash_command": false, "action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS" ], "action_description": "List all available AWS Regions", "action_entry_function": "aws_list_all_regions", "action_is_check": false, "action_is_remediation": false, "action_needs_credential": true, "action_next_hop": null, "action_next_hop_parameter_mapping": null, "action_nouns": [ "regions", "aws" ], "action_output_type": "ACTION_OUTPUT_TYPE_LIST", "action_supports_iteration": true, "action_supports_poll": true, "action_title": "AWS List All Regions", "action_type": "LEGO_TYPE_AWS", "action_verbs": [ "list" ], "action_version": "1.0.0" }, "name": "AWS List All Regions", "orderProperties": [], "outputParams": { "output_name": "region", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not region", "tags": [ "aws_list_all_regions" ], "uuid": "708ea4af5f8fe7096a15b3a52c4a657606bab9e177386fad7a847341ed607d64", "version": "1.0.0" }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2021 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "from typing import Dict, List\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_list_all_regions_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_list_all_regions(handle) -> List:\n", " \"\"\"aws_list_all_regions lists all the AWS regions\n", "\n", " :type handle: object\n", " :param handle: Object returned from Task Validate\n", "\n", " :rtype: Result List of result\n", " \"\"\"\n", "\n", " result = handle.aws_cli_command(\"aws ec2 --region us-west-2 describe-regions --all-regions --query 'Regions[].{Name:RegionName}' --output text\")\n", " if result is None or result.returncode != 0:\n", " print(\"Error while executing command : {}\".format(result))\n", " return str()\n", " result_op = list(result.stdout.split(\"\\n\"))\n", " list_region = [x for x in result_op if x != '']\n", " return list_region\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not region\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"region\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_list_all_regions, lego_printer=aws_list_all_regions_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "2020e8d0-ba3b-4c71-84b2-10917465a27e", "metadata": { "name": "Step 1", "orderProperties": [], "tags": [], "title": "Step 1" }, "source": [ "This action captures the following ouput:
\n", "region
Using unSkript's Filter AWS Find Unused NAT Gateways action, we will find unused gateways given a threshold number of days from the metric BytesIn. If the metric gives an empty result, we consider the NAT Gateway to be unused in the last x days.
\n", "\n", "\n", "This action takes the following parameters:
\n", "region, threhold_days
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "73c6c8db-6fca-4f7b-9fa8-a2f57da9b2c1", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionOutputType": null, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "0f0c137beaf6a9246508393d1e868cea529d30a88631cd0f321799acbfbd47bb", "collapsed": true, "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "This action to get all of the Nat gateways that have zero traffic over those", "id": 4, "index": 4, "inputData": [ { "number_of_days": { "constant": false, "value": "int(threshold_days)" }, "region": { "constant": false, "value": "iter_item" } } ], "inputschema": [ { "properties": { "number_of_days": { "description": "Number of days to check the Datapoints.", "title": "Number of Days", "type": "integer" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [], "title": "aws_filter_unused_nat_gateway", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": "region", "iter_list": { "constant": false, "objectItems": false, "value": "region" } } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "metadata": { "action_bash_command": false, "action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_NAT_GATEWAY", "CATEGORY_TYPE_AWS_EC2" ], "action_description": "This action to get all of the Nat gateways that have zero traffic over those", "action_entry_function": "aws_filter_unused_nat_gateway", "action_is_check": true, "action_is_remediation": false, "action_needs_credential": true, "action_next_hop": [], "action_next_hop_parameter_mapping": {}, "action_nouns": null, "action_output_type": "ACTION_OUTPUT_TYPE_LIST", "action_supports_iteration": true, "action_supports_poll": true, "action_title": "AWS Find Unused NAT Gateways", "action_type": "LEGO_TYPE_AWS", "action_verbs": null, "action_version": "1.0.0" }, "name": "AWS Find Unused NAT Gateways", "orderProperties": [ "region", "number_of_days" ], "outputParams": { "output_name": "unused_gateways", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not nat_gateway_ids", "tags": [ "aws_filter_unused_nat_gateway" ], "uuid": "0f0c137beaf6a9246508393d1e868cea529d30a88631cd0f321799acbfbd47bb", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import Optional, Tuple\n", "from pydantic import BaseModel, Field\n", "from datetime import datetime, timedelta\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_filter_unused_nat_gateway_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "def is_nat_gateway_used(handle, nat_gateway, start_time, end_time,number_of_days):\n", " datapoints = []\n", " if nat_gateway['State'] != 'deleted':\n", " # Get the metrics data for the specified NAT Gateway over the last 7 days\n", " metrics_data = handle.get_metric_statistics(\n", " Namespace='AWS/NATGateway',\n", " MetricName='ActiveConnectionCount',\n", " Dimensions=[\n", " {\n", " 'Name': 'NatGatewayId',\n", " 'Value': nat_gateway['NatGatewayId']\n", " },\n", " ],\n", " StartTime=start_time,\n", " EndTime=end_time,\n", " Period=86400*number_of_days,\n", " Statistics=['Sum']\n", " )\n", " datapoints += metrics_data['Datapoints']\n", " if len(datapoints) == 0 or metrics_data['Datapoints'][0]['Sum']==0:\n", " return False\n", " else:\n", " return True\n", "\n", "\n", "@beartype\n", "def aws_filter_unused_nat_gateway(handle, number_of_days: int = 7, region: str = \"\") -> Tuple:\n", " \"\"\"aws_get_natgateway_by_vpc Returns an array of NAT gateways.\n", "\n", " :type region: string\n", " :param region: Region to filter NAT Gateways.\n", "\n", " :type number_of_days: int\n", " :param number_of_days: Number of days to check the Datapoints.\n", "\n", " :rtype: Array of NAT gateways.\n", " \"\"\"\n", " result = []\n", " end_time = datetime.utcnow()\n", " start_time = end_time - timedelta(days=number_of_days)\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", "\n", " for reg in all_regions:\n", " try:\n", " ec2Client = handle.client('ec2', region_name=reg)\n", " cloudwatch = handle.client('cloudwatch', region_name=reg)\n", " response = ec2Client.describe_nat_gateways()\n", " for nat_gateway in response['NatGateways']:\n", " nat_gateway_info = {}\n", " if not is_nat_gateway_used(cloudwatch, nat_gateway, start_time, end_time,number_of_days):\n", " nat_gateway_info[\"nat_gateway_id\"] = nat_gateway['NatGatewayId']\n", " nat_gateway_info[\"reg\"] = reg\n", " result.append(nat_gateway_info)\n", " except Exception as e:\n", " pass\n", "\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"number_of_days\": \"int(threshold_days)\",\n", " \"region\": \"iter_item\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"region\",\n", " \"iter_parameter\": \"region\"\n", " }''')\n", "\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not nat_gateway_ids\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"unused_gateways\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_filter_unused_nat_gateway, lego_printer=aws_filter_unused_nat_gateway_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "a311041f-620a-4b6b-914f-e52c6c3a71f4", "metadata": { "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following output:
\n", "unused_gateways
This action filters regions that have no unused gateways and creates a list of those that have them.
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "b85ce542-bdf0-44d2-9e75-213002d5c036", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of Unused NAT Gateways", "orderProperties": [], "tags": [], "title": "Create List of Unused NAT Gateways" }, "outputs": [], "source": [ "all_unused_gateways = []\n", "dummy = []\n", "try:\n", " for reg,res in unused_gateways.items():\n", " if res[0]==False:\n", " if len(res[1])!=0:\n", " dummy = res[1]\n", " for x in dummy:\n", " all_unused_gateways.append(x)\n", "except Exception:\n", " for nat_id in nat_gateway_ids:\n", " data_dict = {}\n", " data_dict[\"reg\"] = region[0]\n", " data_dict[\"nat_gateway_id\"] = nat_id\n", " all_unused_gateways.append(data_dict)\n", "print(all_unused_gateways)" ] }, { "cell_type": "markdown", "id": "9fb3704a-9b19-49c4-96ab-a982217bbcd3", "metadata": { "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following output:
\n", "all_unused_gateways
This action deleted unused log streams found in Step 1.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "84d27641-52db-4efc-9cb7-e52995729c2f", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionOutputType": null, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "c24c20b1d1d8a9f31ddbf6f2adf96cbd37df3a0fcf99e4a9a85b1f8b897ad8d4", "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "AWS Delete NAT Gateway", "id": 2, "index": 2, "inputData": [ { "nat_gateway_id": { "constant": false, "value": "\"iter.get(\\\\\"nat_gateway_id\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"reg\\\\\")\"" } } ], "inputschema": [ { "properties": { "nat_gateway_id": { "description": "ID of the NAT Gateway.", "title": "NAT Gateway ID", "type": "string" }, "region": { "description": "AWS Region of the bucket.", "title": "Region", "type": "string" } }, "required": [ "nat_gateway_id", "region" ], "title": "aws_delete_nat_gateway", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "nat_gateway_id": "nat_gateway_id", "region": "reg" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_unused_gateways" } } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_AWS", "metadata": { "action_bash_command": false, "action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS" ], "action_description": "AWS Delete NAT Gateway", "action_entry_function": "aws_delete_nat_gateway", "action_is_check": false, "action_is_remediation": false, "action_needs_credential": true, "action_next_hop": null, "action_next_hop_parameter_mapping": null, "action_nouns": null, "action_output_type": "ACTION_OUTPUT_TYPE_DICT", "action_supports_iteration": true, "action_supports_poll": true, "action_title": "AWS Delete NAT Gateway", "action_type": "LEGO_TYPE_AWS", "action_verbs": null, "action_version": "1.0.0" }, "name": "AWS Delete NAT Gateway", "orderProperties": [ "nat_gateway_id", "region" ], "printOutput": true, "startcondition": "len(all_unused_gateways)!=0", "tags": [ "aws_delete_nat_gateway" ], "title": "AWS Delete NAT Gateway", "uuid": "c24c20b1d1d8a9f31ddbf6f2adf96cbd37df3a0fcf99e4a9a85b1f8b897ad8d4", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Dict\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_delete_nat_gateway_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_delete_nat_gateway(handle, nat_gateway_id: str, region: str) -> Dict:\n", " \"\"\"aws_delete_nat_gateway Returns an dict of NAT gateways information.\n", "\n", " :type region: string\n", " :param region: Region to filter instances.\n", "\n", " :type nat_gateway_id: string\n", " :param nat_gateway_id: ID of the NAT Gateway.\n", "\n", " :rtype: dict of NAT gateways information.\n", " \"\"\"\n", " try:\n", " ec2Client = handle.client('ec2', region_name=region)\n", " response = ec2Client.delete_nat_gateway(NatGatewayId=nat_gateway_id)\n", " return response\n", " except Exception as e:\n", " raise Exception(e)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"nat_gateway_id\": \"iter.get(\\\\\"nat_gateway_id\\\\\")\",\n", " \"region\": \"iter.get(\\\\\"reg\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_unused_gateways\",\n", " \"iter_parameter\": [\"nat_gateway_id\",\"region\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_unused_gateways)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_delete_nat_gateway, lego_printer=aws_delete_nat_gateway_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "9c7430c8-3660-45bd-90ef-9ceab77e3daa", "metadata": { "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "region, nat_gateway_id
In this Runbook, we were able to filter unused NAT Gateways given a threshold number of days and delete them. This runbook enables us to saves cost as AWS charges us based on the number of hours the gateway was available and the data (GB) it processes. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Delete Unused AWS NAT Gateways", "parameters": null }, "kernelspec": { "display_name": "Python 3.10.6 64-bit", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "outputParameterSchema": { "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterSchema": { "properties": { "nat_gateway_ids": { "description": "List of NAT Gateway ID's. ", "title": "nat_gateway_ids", "type": "array" }, "region": { "description": "AWS Region(s) to search for unused NAT Gateway. Eg: [\"us-west-2\",\"ap-south-1\"]", "title": "region", "type": "array" }, "threshold_days": { "default": 7, "description": "Threshold number of days to check if a NAT Gateway was used.", "title": "threshold_days", "type": "number" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/Delete_Unused_AWS_NAT_Gateways.json ================================================ { "name": "Delete Unused AWS NAT Gateways", "description": "This runbook can be used to identify and remove any unused NAT Gateways. This allows us to adhere to best practices and avoid unnecessary costs. NAT gateways are used to connect a private instance with outside networks. When a NAT gateway is provisioned, AWS charges you based on the number of hours it was available and the data (GB) it processes.", "uuid": "26da5206a0a18b30a83f9a72e0dc61408237920bf84831165974610c79875bfb", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/Detach_Instance_from_ASG.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "9a175295-d9f6-47f1-bab9-c4b9d6cdf375", "metadata": { "jupyter": { "source_hidden": false }, "name": "Runbook Overview", "orderProperties": [], "tags": [], "title": "Runbook Overview" }, "source": [ "1. Get Unhealthy instances from ASG
\n", "2. AWS Detach Instances From AutoScaling Group
" ] }, { "cell_type": "code", "execution_count": 11, "id": "d4246eb1-a222-4926-8d78-39ed59991674", "metadata": { "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-08T13:09:35.318Z" }, "jupyter": { "source_hidden": true }, "name": "Input Verification", "orderProperties": [], "tags": [], "title": "Input Verification", "credentialsJson": {} }, "outputs": [], "source": [ "if instance_id and not region:\n", " raise SystemExit(\"Provide region for the instance!\")" ] }, { "attachments": {}, "cell_type": "markdown", "id": "3125e39b-1f1a-4927-b0ad-8589898dce2e", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 A", "orderProperties": [], "tags": [], "title": "Step-1 A" }, "source": [ "Using unSkript's Get AWS AutoScaling Group Instances action we list all the EC2 instances for a given region with Auto Scaling Group name. This action only executes if the instance_id and region have been given as parameters.
\n", "instance_ids, regionasg_instanceIn this action, we list all the available regions from AWS if the user does not provide a region as a parameter.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 4, "id": "09ac66fd-9282-4e66-b899-23577859adcb", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "708ea4af5f8fe7096a15b3a52c4a657606bab9e177386fad7a847341ed607d64", "condition_enabled": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "List all available AWS Regions", "execution_data": { "last_date_success_run_cell": "2023-02-08T13:17:57.248Z" }, "id": 215, "index": 215, "inputschema": [ { "properties": {}, "title": "aws_list_all_regions", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS List All Regions", "nouns": [ "regions", "aws" ], "orderProperties": [], "output": { "type": "" }, "outputParams": { "output_name": "region", "output_name_enabled": true }, "printOutput": true, "startcondition": "not region", "tags": [ "aws_list_all_regions" ], "verbs": [ "list" ] }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2021 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "from typing import Dict, List\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_list_all_regions_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_list_all_regions(handle) -> List:\n", " \"\"\"aws_list_all_regions lists all the AWS regions\n", "\n", " :type handle: object\n", " :param handle: Object returned from Task Validate\n", "\n", " :rtype: Result List of result\n", " \"\"\"\n", "\n", " result = handle.aws_cli_command(\"aws ec2 describe-regions --all-regions --query 'Regions[].{Name:RegionName}' --output text\")\n", " if result is None or result.returncode != 0:\n", " print(\"Error while executing command : {}\".format(result))\n", " return str()\n", " result_op = list(result.stdout.split(\"\\n\"))\n", " list_region = [x for x in result_op if x != '']\n", " return list_region\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not region\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"region\")\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_list_all_regions, lego_printer=aws_list_all_regions_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "561775c0-545a-4ca2-9c79-11b919f7dac0", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 B", "orderProperties": [], "tags": [], "title": "Step-1 B" }, "source": [ "Output variable:
\n", "region
Here we will use unSkript Get Unhealthy instances from ASG action. This action filters all the unhealthy instances from the Auto Scaling Group. It will execute if the instance_id parameter is not given.
\n", "\n", "Input parameters:
\n", "region
\n", "" ] }, { "cell_type": "code", "execution_count": 5, "id": "28d0cedd-44e9-4deb-abc3-5e05442a46a9", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "5de92ab7221455580796b1ebe93c61e3fec51d5dac22e907f96b6e0d7564e0ad", "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Get Unhealthy instances from Auto Scaling Group", "execution_data": { "last_date_success_run_cell": "2023-02-08T13:18:20.322Z" }, "id": 172, "index": 172, "inputData": [ { "region": { "constant": false, "value": "iter_item" } } ], "inputschema": [ { "properties": { "region": { "default": "", "description": "AWS Region of the ASG.", "title": "Region", "type": "string" } }, "title": "aws_filter_unhealthy_instances_from_asg", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": "region", "iter_list": { "constant": false, "objectItems": false, "value": "region" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Get Unhealthy instances from ASG", "nouns": [], "orderProperties": [ "region" ], "output": { "type": "" }, "outputParams": { "output_name": "unhealthy_instance", "output_name_enabled": true }, "printOutput": true, "startcondition": "not instance_id", "tags": [ "aws_filter_unhealthy_instances_from_asg" ], "title": "Get Unhealthy instances from ASG", "verbs": [] }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import Optional, Tuple\n", "from pydantic import BaseModel, Field\n", "from unskript.legos.utils import CheckOutput, CheckOutputStatus\n", "from unskript.connectors.aws import aws_get_paginator\n", "from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_filter_unhealthy_instances_from_asg_printer(output):\n", " if output is None:\n", " return\n", "\n", " if isinstance(output, CheckOutput):\n", " print(output.json())\n", " else:\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_filter_unhealthy_instances_from_asg(handle, region: str = \"\") -> CheckOutput:\n", " \"\"\"aws_filter_unhealthy_instances_from_asg gives unhealthy instances from ASG\n", "\n", " :type region: string\n", " :param region: AWS region.\n", "\n", " :rtype: CheckOutput with status result and list of unhealthy instances from ASG.\n", " \"\"\"\n", " result = []\n", " all_regions = [region]\n", " if not region:\n", " all_regions = aws_list_all_regions(handle)\n", "\n", " for reg in all_regions:\n", " try:\n", " asg_client = handle.client('autoscaling', region_name=reg)\n", " response = aws_get_paginator(asg_client, \"describe_auto_scaling_instances\", \"AutoScalingInstances\")\n", "\n", " # filter instances to only include those that are in an \"unhealthy\" state\n", " for instance in response:\n", " data_dict = {}\n", " if instance['HealthStatus'] == 'Unhealthy':\n", " data_dict[\"InstanceId\"] = instance[\"InstanceId\"]\n", " data_dict[\"AutoScalingGroupName\"] = instance[\"AutoScalingGroupName\"]\n", " data_dict[\"region\"] = reg\n", " result.append(data_dict)\n", "\n", " except Exception as e:\n", " pass\n", "\n", " if len(result) != 0:\n", " return CheckOutput(status=CheckOutputStatus.FAILED,\n", " objects=result,\n", " error=str(\"\"))\n", " else:\n", " return CheckOutput(status=CheckOutputStatus.SUCCESS,\n", " objects=result,\n", " error=str(\"\"))\n", "\n", "\n", "\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"iter_item\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"region\",\n", " \"iter_parameter\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not instance_id\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"unhealthy_instance\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_filter_unhealthy_instances_from_asg, lego_printer=aws_filter_unhealthy_instances_from_asg_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "32d0f938-ad56-453c-89be-52c139228017", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 Extension", "orderProperties": [], "tags": [], "title": "Step-1 Extension" }, "source": [ "Output variable:
\n", "unhealthy_instance
In this action, we modify the output from step 1 A and step 1 B to return a list of dictionary items for the unhealthy instances from ASG.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 12, "id": "e47022b7-ec19-4149-a7a7-3e2ebde54f87", "metadata": { "collapsed": true, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-08T13:23:56.168Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Modify Output", "orderProperties": [], "tags": [], "title": "Modify Output", "credentialsJson": {} }, "outputs": [], "source": [ "detach_instance_list = []\n", "try:\n", " for k, v in asg_instance.items():\n", " for i in v:\n", " detach_instance_list.append(i)\n", "except Exception as e:\n", " if unhealthy_instance and not asg_name:\n", " for k, v in unhealthy_instance.items():\n", " if v.status == CheckOutputStatus.FAILED:\n", " for instance in v.objects:\n", " detach_instance_list.append(instance)\n", " else:\n", " for k, v in unhealthy_instance.items():\n", " if v.status == CheckOutputStatus.FAILED:\n", " for instance in v.objects:\n", " if asg_name in instance[\"AutoScalingGroupName\"]:\n", " detach_instance_list.append(instance)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "614ed424-9394-449e-9dc6-5547f765470a", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "Output variable: detach_instance_list
\n", "
In this action, we detach the AWS unhealthy instances from the Auto Scaling Group which we get from step 1.
\n", "\n", "\n", "Input parameters:
\n", "instance_ids, group_name, region
\n", "" ] }, { "cell_type": "code", "execution_count": 10, "id": "95603003-ac39-493a-af8a-f1910784a6f2", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "8e6e08f606d40e2f4481128d356cc67d30be72349074c513627b3f03a178cf6e", "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Use This Action to AWS Detach Instances From AutoScaling Group", "id": 284, "index": 284, "inputData": [ { "group_name": { "constant": false, "value": "\"iter.get(\\\\\"AutoScalingGroupName\\\\\")\"" }, "instance_ids": { "constant": false, "value": "\"iter.get(\\\\\"InstanceId\\\\\")\"" }, "region": { "constant": false, "value": "\"iter.get(\\\\\"region\\\\\")\"" } } ], "inputschema": [ { "properties": { "group_name": { "description": "Name of AutoScaling Group.", "title": "Group Name", "type": "string" }, "instance_ids": { "description": "List of instances.", "title": "Instance IDs", "type": "string" }, "region": { "description": "AWS Region of autoscaling group.", "title": "Region", "type": "string" } }, "required": [ "instance_ids", "group_name", "region" ], "title": "aws_detach_autoscaling_instances", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "group_name": "AutoScalingGroupName", "instance_ids": "InstanceId", "region": "region" }, "iter_list": { "constant": false, "objectItems": true, "value": "detach_instance_list" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Detach Instances From AutoScaling Group", "nouns": [], "orderProperties": [ "instance_ids", "group_name", "region" ], "output": { "type": "" }, "outputParams": { "output_name": "detach_output", "output_name_enabled": true }, "printOutput": true, "startcondition": "len(detach_instance_list)>0", "tags": [ "aws_detach_autoscaling_instances" ], "title": "AWS Detach Instances From AutoScaling Group", "verbs": [] }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import List, Dict\n", "from pydantic import BaseModel, Field\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_detach_autoscaling_instances_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_detach_autoscaling_instances(\n", " handle,\n", " instance_ids: str,\n", " group_name: str,\n", " region: str\n", ") -> Dict:\n", " \"\"\"aws_detach_autoscaling_instances detach instances from autoscaling group.\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type instance_ids: string\n", " :param instance_ids: Name of instances.\n", "\n", " :type group_name: string\n", " :param group_name: Name of AutoScaling Group.\n", "\n", " :type region: string\n", " :param region: AWS Region of autoscaling group.\n", "\n", " :rtype: Dict with the detach instance info.\n", " \"\"\"\n", "\n", " ec2Client = handle.client(\"autoscaling\", region_name=region)\n", " result = {}\n", " try:\n", " response = ec2Client.detach_instances(\n", " InstanceIds=[instance_ids],\n", " AutoScalingGroupName=group_name,\n", " ShouldDecrementDesiredCapacity=True\n", " )\n", " result = response\n", " except Exception as error:\n", " result[\"error\"] = error\n", "\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"group_name\": \"iter.get(\\\\\"AutoScalingGroupName\\\\\")\",\n", " \"instance_ids\": \"iter.get(\\\\\"InstanceId\\\\\")\",\n", " \"region\": \"iter.get(\\\\\"region\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"detach_instance_list\",\n", " \"iter_parameter\": [\"instance_ids\",\"group_name\",\"region\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(detach_instance_list)>0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(outputName=\"detach_output\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_detach_autoscaling_instances, lego_printer=aws_detach_autoscaling_instances_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "346d8d07-6708-4663-bf8c-5d17c8b6506f", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "### Conclusion\n", "Output variable:
\n", "detach_output
In this Runbook, we demonstrated the use of unSkript's AWS actions. This runbook helps to detach the instances from the Auto Scaling Group. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Detach EC2 Instance from ASG", "parameters": [ "asg_name", "instance_id", "region" ] }, "kernelspec": { "display_name": "unSkript (Build: 839)", "name": "python_kubernetes" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3" }, "parameterSchema": { "properties": { "asg_name": { "description": "Auto Scaling Group Name. Note: if ASG name is given no need to give region.", "title": "asg_name", "type": "string" }, "instance_id": { "description": "Instance Ids that are attached to Auto Scaling Group. Note: if instance id is given then the region is mandatory.", "title": "instance_id", "type": "array" }, "region": { "description": "AWS region e.g.[\"us-west-2\"]", "title": "region", "type": "array" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": null }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/Detach_Instance_from_ASG.json ================================================ { "name": "Detach EC2 Instance from ASG", "description": "This runbook can be used to detach an instance from Auto Scaling Group. You can remove (detach) an instance that is in the Service state from an Auto Scaling group. After the instance is detached, you can manage it independently from the rest of the Auto Scaling group. By detaching an instance, you can move an instance out of one Auto Scaling group and attach it to a different group. For more information, see Attach EC2 instances to your Auto Scaling group.", "uuid": "5ef84b8b1ddc1b41112bc18d14fdda95535f0b271a31232c821f7b56753b77fd", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/Detect_ECS_failed_deployment.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "111d99a7", "metadata": {}, "source": [ "\n", "
IAM Access should follow the policy of least privilege. This means that the credentials give \"exactly, enough\" access to perform the requried task, but no more. That way, if the credentials were ever to be compromised, the blast radius is minimized.
\n", "This RunBook will take an active IAM profile, and analyze it's access over the last <threshold> hours. Using CloudTrail logs, we can determine what was accessed, and create a new IAM profile that gives access to just these features,
\n", "\n", "
You will need two IAM accounts to complete this Runbook:
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "3f89abe5-c88d-41b9-a7fa-4bb909c9282f", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "856a647c-33c8-4c2a-8a29-33d9fb6cfacd", "checkEnabled": false, "continueOnError": false, "createTime": "2023-03-24T10:38:24Z", "credentialsJson": {}, "currentVersion": "v0.0.0", "customCell": true, "description": "Describe all CloudTrail Logs in a Region", "execution_data": { "last_date_success_run_cell": "2023-04-13T21:50:22.735Z" }, "id": 100353, "index": 100353, "inputData": [ { "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "region": { "default": "\"us-west-2\"", "description": "AWS Region", "required": true, "title": "region", "type": "string" } }, "required": [ "region" ], "title": "aws_describe_cloudtrail", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Describe Cloudtrails ", "nouns": [], "orderProperties": [ "region" ], "output": { "type": "" }, "printOutput": true, "tags": [], "title": "AWS Describe Cloudtrails ", "verbs": [] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field, SecretStr\n", "from typing import Dict, List\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_describe_cloudtrail_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def aws_describe_cloudtrail(handle, region:str) -> Dict:\n", " # Create a client object for CloudTrail\n", " cloudtrail_client = handle.client('cloudtrail', region_name=region)\n", "\n", " # Use the describe_trails method to get information about the available trails\n", " trails = cloudtrail_client.describe_trails()\n", "\n", "\n", " return trails\n", "\n", "task = Task(Workflow())\n", "\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_describe_cloudtrail, lego_printer=aws_describe_cloudtrail_printer, hdl=hdl, args=args)" ], "output": {} }, { "cell_type": "code", "execution_count": null, "id": "e18563ce-5ea1-44c8-b903-5143129ae002", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "8f56b9e753e57065e02e107dfd472df3e3b6e3440bd8156f37dc752a1f337909", "checkEnabled": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "List all AWS IAM Users", "execution_data": { "last_date_success_run_cell": "2023-04-13T21:50:27.772Z" }, "id": 265, "index": 265, "inputschema": [ { "properties": {}, "title": "aws_list_all_iam_users", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS List All IAM Users", "nouns": [ "users", "iam", "aws" ], "orderProperties": [], "output": { "type": "" }, "printOutput": true, "tags": [ "aws_list_all_iam_users" ], "verbs": [ "list" ] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field, SecretStr\n", "from typing import Dict, List\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_list_all_iam_users_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def aws_list_all_iam_users(handle) -> List:\n", " \"\"\"aws_list_all_iam_users lists all the IAM users\n", "\n", " :type handle: object\n", " :param handle: Object returned from Task Validate\n", "\n", " :rtype: Result List of all IAM users\n", " \"\"\"\n", " client = handle.client('iam')\n", " users_list=[]\n", " response = client.list_users()\n", " try:\n", " for x in response['Users']:\n", " users_list.append(x['UserName'])\n", " except Exception as e:\n", " users_list.append(e)\n", " return users_list\n", "\n", "\n", "task = Task(Workflow())\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_list_all_iam_users, lego_printer=aws_list_all_iam_users_printer, hdl=hdl, args=args)" ], "output": {} }, { "cell_type": "code", "execution_count": null, "id": "ee69210c-1492-43e0-8bf9-3a6a046fb1a2", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": true, "action_uuid": "306d6f72-62a4-4313-abc0-4dab0e8d5442", "checkEnabled": false, "continueOnError": false, "createTime": "2023-03-24T10:37:02Z", "credentialsJson": {}, "currentVersion": "v0.0.0", "customCell": true, "description": "Using the Access Analyzer, use an existing IAM profile and track it's usage over a period of time. Generate a credentials profile based on that usage.", "execution_data": { "last_date_success_run_cell": "2023-04-14T17:02:12.053Z" }, "id": 100352, "index": 100352, "inputData": [ { "AccessRole": { "constant": false, "value": "\"arn:aws:iam::100498623390:role/service-role/AccessAnalyzerMonitorServiceRole_CTBKDXMCCK\"" }, "CloudTrailARN": { "constant": false, "value": "CloudTrailArn" }, "IAMPrincipalARN": { "constant": false, "value": "reference_iam_arn" }, "hours": { "constant": false, "value": "24" }, "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "AccessRole": { "default": "\"arn:aws:iam::100498623390:role/service-role/AccessAnalyzerMonitorServiceRole_CTBKDXMCCK\"", "description": "Access Role that can query the CloudTrail Logs", "required": true, "title": "AccessRole", "type": "string" }, "CloudTrailARN": { "default": "", "description": "Cloud Trail ARN", "required": true, "title": "CloudTrailARN", "type": "string" }, "IAMPrincipalARN": { "default": "", "description": "IAM ARN we are copying the profile into.", "required": true, "title": "IAMPrincipalARN", "type": "string" }, "hours": { "default": 24, "description": "Hours of data to examine", "required": true, "title": "hours", "type": "number" }, "region": { "default": "\"us-west-2\"", "description": "AWS Region", "required": true, "title": "region", "type": "string" } }, "required": [ "AccessRole", "CloudTrailARN", "IAMPrincipalARN", "hours", "region" ], "title": "AWS_Start_IAM_Policy_Generation", "type": "object" } ], "isUnskript": false, "legotype": "LEGO_TYPE_AWS", "name": "AWS Start IAM Policy Generation ", "nouns": [], "orderProperties": [ "region", "CloudTrailARN", "IAMPrincipalARN", "AccessRole", "hours" ], "output": { "type": "" }, "outputParams": { "output_name": "jobId", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "tags": [], "title": "AWS Start IAM Policy Generation ", "verbs": [] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2023 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field, SecretStr\n", "from typing import Dict, List\n", "import pprint\n", "from datetime import datetime, timedelta\n", "\n", "from beartype import beartype\n", "@beartype\n", "def AWS_Start_IAM_Policy_Generation_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def AWS_Start_IAM_Policy_Generation(handle, region:str, CloudTrailARN:str, IAMPrincipalARN:str, AccessRole:str, hours:float) -> str:\n", "\n", " client = handle.client('accessanalyzer', region_name=region)\n", " policyGenerationDict = {'principalArn': IAMPrincipalARN}\n", " myTrail = {'cloudTrailArn': CloudTrailARN,\n", " 'regions': [region],\n", " 'allRegions': False\n", " }\n", " endTime = datetime.now()\n", " endTime = endTime.strftime(\"%Y-%m-%dT%H:%M:%S\")\n", " startTime = datetime.now()- timedelta(hours =hours)\n", " startTime =startTime.strftime(\"%Y-%m-%dT%H:%M:%S\")\n", " response = client.start_policy_generation( \n", " policyGenerationDetails=policyGenerationDict,\n", " cloudTrailDetails={\n", " 'trails': [myTrail],\n", " 'accessRole': AccessRole,\n", " 'startTime': startTime,\n", " 'endTime': endTime\n", " }\n", " )\n", " jobId = response['jobId']\n", " return jobId\n", "\n", "task = Task(Workflow())\n", "\n", "task.configure(inputParamsJson='''{\n", " \"AccessRole\": \"\\\\\"arn:aws:iam::100498623390:role/service-role/AccessAnalyzerMonitorServiceRole_CTBKDXMCCK\\\\\"\",\n", " \"CloudTrailARN\": \"CloudTrailArn\",\n", " \"IAMPrincipalARN\": \"reference_iam_arn\",\n", " \"hours\": \"float(24)\",\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(outputName=\"jobId\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(AWS_Start_IAM_Policy_Generation, lego_printer=AWS_Start_IAM_Policy_Generation_printer, hdl=hdl, args=args)" ], "output": {} }, { "cell_type": "code", "execution_count": null, "id": "14a19146-ef56-4643-8d21-8e742c0beb6e", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "5548a22f-4669-4ad2-9f61-268507f818c7", "checkEnabled": false, "continueOnError": false, "createTime": "2023-03-24T10:41:12Z", "credentialsJson": {}, "currentVersion": "v0.0.0", "customCell": true, "description": "Once an Access Policy has been generated, this Action retrieves the policy.", "execution_data": { "last_date_success_run_cell": "2023-04-17T13:16:02.591Z" }, "id": 100355, "index": 100355, "inputData": [ { "jobId": { "constant": false, "value": "jobId" }, "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "jobId": { "default": "", "description": "Policy JobId", "required": true, "title": "jobId", "type": "string" }, "region": { "default": "\"us-west-2\"", "description": "region", "required": true, "title": "region", "type": "string" } }, "required": [ "jobId", "region" ], "title": "aws_get_generated_policy", "type": "object" } ], "isUnskript": false, "legotype": "LEGO_TYPE_AWS", "name": "AWS Get Generated Policy", "nouns": [], "orderProperties": [ "region", "jobId" ], "output": { "type": "" }, "outputParams": { "output_name": "generatedPolicy", "output_name_enabled": true }, "printOutput": true, "tags": [], "title": "AWS Get Generated Policy", "verbs": [] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2023 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field, SecretStr\n", "from typing import Dict, List\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_generated_policy_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def aws_get_generated_policy(handle, region:str,jobId:str) -> Dict:\n", " client = handle.client('accessanalyzer', region_name=region)\n", " response = client.get_generated_policy(\n", " jobId=jobId,\n", " includeResourcePlaceholders=True,\n", " includeServiceLevelTemplate=True\n", " )\n", " result = {}\n", " result['generatedPolicyResult'] = response['generatedPolicyResult']\n", " result['generationStatus'] = response['jobDetails']['status']\n", " return result\n", "\n", "task = Task(Workflow())\n", "\n", "task.configure(inputParamsJson='''{\n", " \"jobId\": \"jobId\",\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(outputName=\"generatedPolicy\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_generated_policy, lego_printer=aws_get_generated_policy_printer, hdl=hdl, args=args)" ], "output": {} }, { "cell_type": "code", "execution_count": null, "id": "44929a58-d92e-4c2f-b3f4-a730ab7aed92", "metadata": { "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-04-17T13:16:49.500Z" }, "orderProperties": [], "tags": [], "credentialsJson": {} }, "outputs": [], "source": [ "print(generatedPolicy['generationStatus'])\n", "\n" ], "output": {} }, { "cell_type": "code", "execution_count": null, "id": "b6f9b8b8-de1b-4e64-8e3d-23cb7ee71bed", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "8f56b9e753e57065e02e107dfd472df3e3b6e3440bd8156f37dc752a1f337909", "checkEnabled": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "customCell": true, "description": "List all AWS IAM Users", "execution_data": { "last_date_success_run_cell": "2023-04-13T21:55:05.777Z" }, "id": 265, "index": 265, "inputschema": [ { "properties": {}, "required": [], "title": "aws_get_acount_number", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Get AWS Account Number", "nouns": [ "users", "iam", "aws" ], "orderProperties": [], "output": { "type": "" }, "outputParams": { "output_name": "accountNumber", "output_name_enabled": true }, "printOutput": true, "tags": [ "aws_list_all_iam_users" ], "title": "AWS Get AWS Account Number", "verbs": [ "list" ] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field, SecretStr\n", "from typing import Dict, List\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_acount_number_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def aws_get_acount_number(handle) -> str:\n", " # Create a client object for the AWS Identity and Access Management (IAM) service\n", " iam_client = handle.client('iam')\n", "\n", " # Call the get_user() method to get information about the current user\n", " response = iam_client.get_user()\n", "\n", " # Extract the account ID from the ARN (Amazon Resource Name) of the user\n", " account_id = response['User']['Arn'].split(':')[4]\n", "\n", " # Print the account ID\n", " return account_id\n", "\n", "\n", "task = Task(Workflow())\n", "\n", "task.configure(outputName=\"accountNumber\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_acount_number, lego_printer=aws_get_acount_number_printer, hdl=hdl, args=args)" ], "output": {} }, { "cell_type": "code", "execution_count": null, "id": "5be8f9ec-7591-4a70-b87d-be9bcddd070b", "metadata": { "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-04-17T13:16:57.042Z" }, "name": "clean up policy", "orderProperties": [], "tags": [], "title": "clean up policy", "credentialsJson": {} }, "outputs": [], "source": [ "import json\n", "import re \n", "\n", "\n", "policy = generatedPolicy['generatedPolicyResult']['generatedPolicies'][0]['policy']\n", "#print(policy)\n", "\n", "policy = json.dumps(policy)\n", "\n", "policy = policy.replace('${Region}', \"us-west-2\")\n", "policy = policy.replace('${Account}', accountNumber)\n", "policy = re.sub(\"\\${[A-Za-z]*}\", \"*\", policy)\n", "policy = json.loads(policy)\n", "policy = str(policy)\n", "print(type(policy), policy)\n" ], "output": {} }, { "cell_type": "code", "execution_count": null, "id": "2e2f8b0b-4cd4-4601-b5ea-735c5c9cf6c2", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": true, "action_uuid": "4a3d4143-35d2-4f52-aa36-791cc1d6d2d0", "checkEnabled": false, "continueOnError": false, "createTime": "2023-03-24T10:42:47Z", "credentialsJson": {}, "currentVersion": "v0.0.0", "customCell": true, "description": "Takes a generated policy and saves it as an IAM policy that can be applied to any IAM user.", "execution_data": { "last_date_success_run_cell": "2023-04-14T16:38:15.659Z" }, "id": 100356, "index": 100356, "inputData": [ { "PolicyName": { "constant": false, "value": "policy_name" }, "policyDocument": { "constant": false, "value": "policy" } } ], "inputschema": [ { "properties": { "PolicyName": { "default": "", "description": "Name of Policy to generate at AWS", "required": true, "title": "PolicyName", "type": "string" }, "policyDocument": { "default": "", "description": "Stringified JSON policy", "required": true, "title": "policyDocument", "type": "string" } }, "required": [ "PolicyName", "policyDocument" ], "title": "aws_create_IAMpolicy", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Create IAM Policy", "nouns": [], "orderProperties": [ "policyDocument", "PolicyName" ], "output": { "type": "" }, "outputParams": { "output_name": "createdPolicy", "output_name_enabled": true }, "printOutput": true, "tags": [], "title": "AWS Create IAM Policy", "verbs": [] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field, SecretStr\n", "from typing import Dict, List\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_create_IAMpolicy_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def aws_create_IAMpolicy(handle, policyDocument:str, PolicyName:str) -> Dict:\n", "\n", " client = handle.client('iam')\n", " response = client.create_policy(\n", " PolicyName=PolicyName,\n", " PolicyDocument=policyDocument,\n", " Description='generated Via unSkript',\n", "\n", " )\n", " return response\n", "\n", "task = Task(Workflow())\n", "\n", "task.configure(inputParamsJson='''{\n", " \"PolicyName\": \"policy_name\",\n", " \"policyDocument\": \"policy\"\n", " }''')\n", "task.configure(outputName=\"createdPolicy\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_create_IAMpolicy, lego_printer=aws_create_IAMpolicy_printer, hdl=hdl, args=args)" ], "output": {} }, { "cell_type": "code", "execution_count": null, "id": "81841c82-639c-4fb7-8a1e-aab82134f4e9", "metadata": { "collapsed": true, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-03-30T14:53:52.572Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "orderProperties": [], "tags": [], "credentialsJson": {} }, "outputs": [], "source": [ "print(createdPolicy['Policy']['Arn'])" ], "output": {} }, { "cell_type": "code", "execution_count": null, "id": "22deeb36-539b-493a-9d18-ce9c214cdacd", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "3f71dd060d5955f5dc9104dbaf418bf957b2222c510cb3afd09ded8e41e433d9", "checkEnabled": false, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Create New IAM User", "execution_data": { "last_date_success_run_cell": "2023-04-14T16:38:20.334Z" }, "id": 204, "index": 204, "inputData": [ { "tag_key": { "constant": false, "value": "\"test\"" }, "tag_value": { "constant": false, "value": "\"test\"" }, "user_name": { "constant": false, "value": "user_name" } } ], "inputschema": [ { "properties": { "tag_key": { "description": "Tag Key to new IAM User.", "title": "Tag Key", "type": "string" }, "tag_value": { "description": "Tag Value to new IAM User.", "title": "Tag Value", "type": "string" }, "user_name": { "description": "IAM User Name.", "title": "User Name", "type": "string" } }, "required": [ "user_name", "tag_key", "tag_value" ], "title": "aws_create_iam_user", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Create New IAM User", "nouns": [], "orderProperties": [ "user_name", "tag_key", "tag_value" ], "output": { "type": "" }, "printOutput": true, "tags": [ "aws_create_iam_user" ], "verbs": [] }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import List, Dict\n", "from pydantic import BaseModel, Field\n", "from botocore.exceptions import ClientError\n", "import pprint\n", "from beartype import beartype\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_create_iam_user_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "@beartype\n", "def aws_create_iam_user(handle, user_name: str, tag_key: str, tag_value: str) -> Dict:\n", " \"\"\"aws_create_iam_user Creates new IAM User.\n", "\n", " :type handle: object\n", " :param handle: Object returned by the task.validate(...) method\n", "\n", " :type user_name: string\n", " :param user_name: Name of new IAM User.\n", "\n", " :type tag_key: string\n", " :param tag_key: Tag Key assign to new User.\n", "\n", " :type tag_value: string\n", " :param tag_value: Tag Value assign to new User.\n", "\n", " :rtype: Dict with the stopped instances state info.\n", " \"\"\"\n", "\n", " ec2Client = handle.client(\"iam\")\n", " result = {}\n", " try:\n", " response = ec2Client.create_user(\n", " UserName=user_name,\n", " Tags=[\n", " {\n", " 'Key': tag_key,\n", " 'Value': tag_value\n", " }])\n", " result = response\n", " except ClientError as error:\n", " if error.response['Error']['Code'] == 'EntityAlreadyExists':\n", " result = error.response\n", " else:\n", " result = error.response\n", "\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "\n", "task.configure(inputParamsJson='''{\n", " \"tag_key\": \"\\\\\"test\\\\\"\",\n", " \"tag_value\": \"\\\\\"test\\\\\"\",\n", " \"user_name\": \"user_name\"\n", " }''')\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_create_iam_user, lego_printer=aws_create_iam_user_printer, hdl=hdl, args=args)" ], "output": {} }, { "cell_type": "code", "execution_count": null, "id": "bebb49ec-d4f7-4ca5-910c-d0a836d882a2", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "dee9134df84f6c675edab485389572795169495347e40abbdf81f24ec807a85c", "checkEnabled": false, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "AWS Attach New Policy to User", "execution_data": { "last_date_success_run_cell": "2023-03-30T14:54:31.327Z" }, "id": 215, "index": 215, "inputData": [ { "policy_name": { "constant": false, "value": "createdPolicy['Policy']['Arn']" }, "user_name": { "constant": false, "value": "user_name" } } ], "inputschema": [ { "properties": { "policy_name": { "description": "Policy name to apply the permissions to the user.", "title": "Policy Name", "type": "string" }, "user_name": { "description": "IAM user whose policies need to fetched.", "title": "User Name", "type": "string" } }, "required": [ "user_name", "policy_name" ], "title": "aws_attache_iam_policy", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "AWS Attach New Policy to User", "nouns": [], "orderProperties": [ "user_name", "policy_name" ], "output": { "type": "" }, "printOutput": true, "tags": [ "aws_attache_iam_policy" ], "verbs": [] }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import List, Dict\n", "from pydantic import BaseModel, Field\n", "from botocore.exceptions import ClientError\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_attach_iam_policy_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_attach_iam_policy(handle, user_name: str, policy_name: str) -> Dict:\n", " \"\"\"aws_attache_iam_policy used to provide user permissions.\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type user_name: string\n", " :param user_name: Dictionary of credentials info.\n", "\n", " :type policy_name: string\n", " :param policy_name: Policy name to apply the permissions to the user.\n", "\n", " :rtype: Dict with User policy information.\n", " \"\"\"\n", " result = {}\n", " iamResource = handle.resource('iam')\n", " try:\n", " user = iamResource.User(user_name)\n", " response = user.attach_policy(\n", " PolicyArn='arn:aws:iam::aws:policy/'+policy_name\n", " )\n", " result = response\n", " except ClientError as error:\n", " result = error.response\n", "\n", " return result\n", "\n", "\n", "def unskript_default_printer(output):\n", " if isinstance(output, (list, tuple)):\n", " for item in output:\n", " print(f'item: {item}')\n", " elif isinstance(output, dict):\n", " for item in output.items():\n", " print(f'item: {item}')\n", " else:\n", " print(f'Output for {task.name}')\n", " print(output)\n", "\n", "task = Task(Workflow())\n", "\n", "task.configure(inputParamsJson='''{\n", " \"policy_name\": \"createdPolicy['Policy']['Arn']\",\n", " \"user_name\": \"user_name\"\n", " }''')\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_attach_iam_policy, lego_printer=unskript_default_printer, hdl=hdl, args=args)" ], "output": {} } ], "metadata": { "execution_data": { "runbook_name": "Create an IAM user using Principle of Least Privilege", "parameters": [ "user_name", "CloudTrailArn", "policy_name", "region" ] }, "kernelspec": { "display_name": "unSkript (Build: 1039)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "parameterSchema": { "properties": { "CloudTrailArn": { "default": "arn:aws:cloudtrail:us-west-2:100498623390:trail/management-events", "description": "ARN of the CloudTrail to be queried", "title": "CloudTrailArn", "type": "string" }, "policy_name": { "default": "generated_iam_policy_11", "description": "IAM Policy to be created", "title": "policy_name", "type": "string" }, "reference_iam_arn": { "default": "arn:aws:iam::100498623390:user/doug-billing-s3", "description": "The arn of the Reference IAM. We will build a new policy based on the activity of this account.", "title": "reference_iam_arn", "type": "string" }, "region": { "default": "us-west-2", "description": "AWS Region", "title": "region", "type": "string" }, "threshold": { "default": 24, "description": "Number of hours to examine", "title": "threshold", "type": "number" }, "user_name": { "default": "Doug_generated_iam_14", "description": "IAM user to be created", "title": "user_name", "type": "string" } }, "required": [], "title": "Schema", "type": "object" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/IAM_security_least_privilege.json ================================================ { "name": "Create an IAM user using Principle of Least Privilege", "description": "Extract usage details from Cloudtrail of an existing user. Apply the usage to a new IAM Policy, and connect it to a new IAM profile.", "uuid": "65d8f7ea1d41ccf49b4a624b70cdbde0d16ad9ba348829e4ddbd59a83ce644bc", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SECOPS" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/Monitor_AWS_DynamoDB_provision_capacity.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "1fe9e993-6175-40e2-be4d-b38474f610c4", "metadata": { "jupyter": { "source_hidden": false }, "name": "Monitor AWS DynamoDB provision capacity", "orderProperties": [], "tags": [], "title": "Monitor AWS DynamoDB provision capacity" }, "source": [ "
In this action, we search for all the instances from AWS for a given tag and region and return a list of instances.
\n", "\n", "\n", "Input parameters:
\n", "tag_key, tag_value, region
\n", "" ] }, { "cell_type": "code", "execution_count": 10, "id": "ef696074-ab97-4de7-b3ee-08faeacd22ff", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "a94de204575d5609dce3abee3f63e84913548ad792e51dd949333bf60ebd842a", "checkEnabled": false, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Filter AWS EC2 Instance", "execution_data": { "last_date_success_run_cell": "2023-02-16T11:17:19.558Z" }, "id": 260, "index": 260, "inputData": [ { "region": { "constant": false, "value": "region" }, "tag_key": { "constant": false, "value": "tag_key" }, "tag_value": { "constant": false, "value": "tag_value" } } ], "inputschema": [ { "properties": { "region": { "description": "AWS Region.", "title": "Region", "type": "string" }, "tag_key": { "description": "The key of the tag.", "title": "Tag Key", "type": "string" }, "tag_value": { "description": "The value of the key.", "title": "Tag Value", "type": "string" } }, "required": [ "tag_key", "tag_value", "region" ], "title": "aws_filter_ec2_by_tags", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Filter AWS EC2 Instance by tag", "nouns": [], "orderProperties": [ "tag_key", "tag_value", "region" ], "output": { "type": "" }, "outputParams": { "output_name": "instance_list", "output_name_enabled": true }, "printOutput": true, "tags": [ "aws_filter_ec2_by_tags" ], "title": "Filter AWS EC2 Instance by tag", "trusted": true, "verbs": [] }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import List\n", "from unskript.connectors.aws import aws_get_paginator\n", "import pprint\n", "from beartype import beartype\n", "\n", "\n", "@beartype\n", "def aws_filter_ec2_by_tags_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint({\"Instances\": output})\n", "\n", "\n", "@beartype\n", "def aws_filter_ec2_by_tags(handle, tag_key: str, tag_value: str, region: str) -> List:\n", " \"\"\"aws_filter_ec2_by_tags Returns an array of instances matching tags.\n", "\n", " :type handle: object\n", " :param handle: Object returned by the task.validate(...) method.\n", "\n", " :type tag_key: string\n", " :param tag_key: Key for the EC2 instance tag.\n", "\n", " :type tag_value: string\n", " :param tag_value: value for the EC2 instance tag.\n", "\n", " :type region: string\n", " :param region: EC2 instance region.\n", "\n", " :rtype: Array of instances matching tags.\n", " \"\"\"\n", "\n", " ec2Client = handle.client('ec2', region_name=region)\n", " res = aws_get_paginator(ec2Client, \"describe_instances\", \"Reservations\",\n", " Filters=[{'Name': 'tag:' + tag_key, 'Values': [tag_value]}])\n", "\n", " result = []\n", " for reservation in res:\n", " for instance in reservation['Instances']:\n", " result.append(instance['InstanceId'])\n", " return result\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"tag_key\": \"tag_key\",\n", " \"tag_value\": \"tag_value\"\n", " }''')\n", "task.configure(outputName=\"instance_list\")\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_filter_ec2_by_tags, lego_printer=aws_filter_ec2_by_tags_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "7b35320c-c614-4d36-8fbb-cc102c02f72b", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "Output variable:
\n", "instance_list
Here we will use the unSkript Restart AWS EC2 Instances action. This action is used to restart the instances which we get using the above step 1. We pass the instances IDs list to step 2.
\n", "\n", "\n", "Input parameters:
\n", "instance_ids, region
\n", "" ] }, { "cell_type": "code", "execution_count": 22, "id": "bb7a4450-1efc-4aec-85c6-d9b4a8635762", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "e7d021a8e955291cf31e811e64a86baa2a902ea2185cb76e7121ebbab261c320", "checkEnabled": false, "collapsed": true, "condition_enabled": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Restart AWS EC2 Instances", "execution_data": { "last_date_success_run_cell": "2023-02-16T11:22:08.644Z" }, "id": 257, "index": 257, "inputData": [ { "instance_ids": { "constant": false, "value": "instance_list" }, "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "instance_ids": { "description": "List of instance IDs. For eg. [\"i-foo\", \"i-bar\"]", "items": { "type": "string" }, "title": "Instance IDs", "type": "array" }, "region": { "description": "AWS Region of the instances.", "title": "Region", "type": "string" } }, "required": [ "instance_ids", "region" ], "title": "aws_restart_ec2_instances", "type": "object" } ], "isUnskript": false, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Restart AWS EC2 Instances", "nouns": [], "orderProperties": [ "instance_ids", "region" ], "output": { "type": "" }, "outputParams": { "output_name": "restart_instance", "output_name_enabled": true }, "printOutput": true, "startcondition": "not dry_run_flag", "tags": [ "aws_restart_ec2_instances" ], "trusted": true, "verbs": [] }, "outputs": [], "source": [ "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from typing import List, Dict\n", "from pydantic import BaseModel, Field\n", "import pprint\n", "from beartype import beartype\n", "\n", "\n", "@beartype\n", "def aws_restart_ec2_instances_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_restart_ec2_instances(handle, instance_ids: List, region: str) -> Dict:\n", " \"\"\"aws_restart_instances Restarts instances.\n", "\n", " :type handle: object\n", " :param handle: Object returned by the task.validate(...) method.\n", "\n", " :type instance_ids: list\n", " :param instance_ids: List of instance ids.\n", "\n", " :type region: string\n", " :param region: Region for instance.\n", "\n", " :rtype: Dict with the restarted instances info.\n", " \"\"\"\n", "\n", " ec2Client = handle.client('ec2', region_name=region)\n", " res = ec2Client.reboot_instances(InstanceIds=instance_ids)\n", " return res\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"instance_ids\": \"instance_list\",\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not dry_run_flag\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"restart_instance\")\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_restart_ec2_instances, lego_printer=aws_restart_ec2_instances_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "5336e57c-1c40-4f67-938c-dceee50b42be", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-3", "orderProperties": [], "tags": [], "title": "Step-3" }, "source": [ "Output variable:
\n", "restart_instance
Here we will use unSkript Get AWS Instance Details action to get the details of the instances. This action is used to get details of instances that we received in step 1.
\n", "\n", "\n", "Input parameters:
\n", "instance_id, region
\n", "" ] }, { "cell_type": "code", "execution_count": 23, "id": "b7630f20-a68e-45eb-bb5c-193231b5d262", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "930c0624b3d32846a0946e0a54dac8e69d7a1ee0e28e10de7338c68f06df8420", "checkEnabled": false, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Get AWS Instances Details", "execution_data": { "last_date_success_run_cell": "2023-02-16T11:23:01.767Z" }, "id": 210, "index": 210, "inputData": [ { "instance_id": { "constant": false, "value": "iter_item" }, "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "instance_id": { "description": "ID of the instance.", "title": "Instance Id", "type": "string" }, "region": { "description": "AWS Region of the instance.", "title": "Region", "type": "string" } }, "required": [ "instance_id", "region" ], "title": "aws_get_instance_details", "type": "object" } ], "isUnskript": false, "iterData": [ { "iter_enabled": true, "iter_item": "instance_id", "iter_list": { "constant": false, "objectItems": false, "value": "instance_list" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Get AWS Instances Details", "nouns": [], "orderProperties": [ "instance_id", "region" ], "output": { "type": "" }, "outputParams": { "output_name": "instance_details", "output_name_enabled": true }, "printOutput": true, "tags": [ "aws_get_instance_details" ], "trusted": true, "verbs": [] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Dict\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_instances_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_get_instance_details(handle, instance_id: str, region: str) -> Dict:\n", " \"\"\"aws_get_instance_details Returns instance details.\n", "\n", " :type handle: object\n", " :param handle: Object returned by the task.validate(...) method.\n", "\n", " :type instance_ids: list\n", " :param instance_ids: List of instance ids.\n", "\n", " :type region: string\n", " :param region: Region for instance.\n", "\n", " :rtype: Dict with the instance details.\n", " \"\"\"\n", "\n", " ec2client = handle.client('ec2', region_name=region)\n", " instances = []\n", " response = ec2client.describe_instances(\n", " Filters=[{\"Name\": \"instance-id\", \"Values\": [instance_id]}])\n", " for reservation in response[\"Reservations\"]:\n", " for instance in reservation[\"Instances\"]:\n", " instances.append(instance)\n", "\n", " return instances[0]\n", "\n", "\n", "def unskript_default_printer(output):\n", " if isinstance(output, (list, tuple)):\n", " for item in output:\n", " print(f'item: {item}')\n", " elif isinstance(output, dict):\n", " for item in output.items():\n", " print(f'item: {item}')\n", " else:\n", " print(f'Output for {task.name}')\n", " print(output)\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"instance_id\": \"iter_item\",\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"instance_list\",\n", " \"iter_parameter\": \"instance_id\"\n", " }''')\n", "task.configure(outputName=\"instance_details\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_instance_details, lego_printer=unskript_default_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "bee3abf5-3864-4a20-8154-2293a5c8aa28", "metadata": { "name": "Step-3 Extension", "orderProperties": [], "tags": [], "title": "Step-3 Extension" }, "source": [ "Output variable:
\n", "instance_details
In this action, we sort the output from step-3 and present the details of the instance in the good table.
" ] }, { "cell_type": "code", "execution_count": 24, "id": "a773f2a8-24b3-4dd6-a3c9-6266c9bafa05", "metadata": { "continueOnError": false, "credentialsJson": {}, "execution_data": { "last_date_success_run_cell": "2023-02-16T11:23:04.116Z" }, "inputData": [ {} ], "inputschema": [ { "properties": {}, "required": [], "title": "Instance Details" } ], "jupyter": { "source_hidden": true }, "name": "Instance Details", "orderProperties": [], "tags": [], "title": "Instance Details", "trusted": true }, "outputs": [], "source": [ "import pprint\n", "from pydantic import BaseModel, Field\n", "from typing import Dict\n", "from tabulate import tabulate\n", "\n", "output = instance_details\n", "instance_list = instance_list\n", "\n", "\n", "def Instance_Details(output, instance_list: list):\n", " data1 = []\n", " Header = \"\"\n", " for instance_id in instance_list:\n", " if instance_id in output.keys():\n", " output1 = output[instance_id]\n", " if isinstance(output1, (list, tuple)):\n", " for item in output1:\n", " print(f'item: {item}')\n", " elif isinstance(output1, dict):\n", " for key, value in output1.items():\n", " if isinstance(value, (list)):\n", " pass\n", " else:\n", " if key == \"InstanceId\":\n", " Header = value\n", " data1.append([key, value])\n", " print(f'\\n\\033[1m Table for Instance ID : {Header} \\033[0;0m')\n", " print(tabulate(data1))\n", " else:\n", " print(f'Output for {task.name}')\n", " print(output1)\n", "\n", "\n", "Instance_Details(output, instance_list)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "b3f10b1c-f542-48da-9b6e-1123873385a8", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "### Conclusion\n", "In this Runbook, we demonstrated the use of unSkript's AWS legos to restart the AWS EC2 instances and get the details. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Restart AWS EC2 Instances", "parameters": [ "tag_value", "region", "tag_key" ] }, "kernelspec": { "display_name": "unSkript (Build: 891)", "name": "python_kubernetes" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3" }, "parameterSchema": { "properties": { "dry_run_flag": { "default": false, "description": "If the dry run flag is set to false it will find the instances for the given tag and restart them and if set to true it will only display the instances for the given tag.", "title": "dry_run_flag", "type": "boolean" }, "region": { "default": "us-west-2", "description": "AWS Region", "title": "region", "type": "string" }, "tag_key": { "default": "Service", "description": "Tag Key", "title": "tag_key", "type": "string" }, "tag_value": { "default": "devmongodb", "description": "Tag Value", "title": "tag_value", "type": "string" } }, "required": [ "region", "tag_key", "tag_value" ], "title": "Schema", "type": "object" }, "parameterValues": { "dry_run_flag": false, "region": "us-west-2", "tag_key": "Name", "tag_value": "test-recreate-instance" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/Restart_AWS_EC2_Instances_By_Tag.json ================================================ { "name": "Restart AWS EC2 Instances", "description": "This runbook can be used to Restart AWS EC2 Instances", "uuid": "e6e51e94e093ff3730b95c689232afaa3fc4f337d6fdac0ebb644fb2d6380afd", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_TROUBLESHOOTING" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/Run_EC2_from_AMI.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "0ecd43d6-5d15-4210-95d5-6b7052748b74", "metadata": { "jupyter": { "source_hidden": false }, "name": "Runbook Overview", "orderProperties": [], "tags": [], "title": "Runbook Overview" }, "source": [ "Here we will use unSkript Launch AWS EC2 instance from an AMI action. This action is used to launch an EC2 instance from AMI.
\n", "\n", "\n", "Input parameters:
\n", "ami_id, region
\n", "" ] }, { "cell_type": "code", "execution_count": 9, "id": "cf78d7ed-4073-4231-bff5-54879ff27239", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "dc0cd6cd07b4a3c94ea019493659c3f455a7ae952ea7e5eefcb7c8d402271ef5", "checkEnabled": false, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Use this instance to Launch an AWS EC2 instance from an AMI", "execution_data": { "last_date_success_run_cell": "2023-02-16T11:44:06.426Z" }, "id": 294, "index": 294, "inputData": [ { "ami_id": { "constant": false, "value": "iter_item" }, "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "ami_id": { "description": "AMI Id.", "title": "AMI Id", "type": "string" }, "region": { "description": "AWS Region.", "title": "Region", "type": "string" } }, "required": [ "ami_id", "region" ], "title": "aws_launch_instance_from_ami", "type": "object" } ], "isUnskript": false, "iterData": [ { "iter_enabled": true, "iter_item": "ami_id", "iter_list": { "constant": false, "objectItems": false, "value": "ami_id" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Launch AWS EC2 Instance From an AMI", "nouns": [], "orderProperties": [ "ami_id", "region" ], "output": { "type": "" }, "outputParams": { "output_name": "launch_instance", "output_name_enabled": true }, "printOutput": true, "tags": [ "aws_launch_instance_from_ami" ], "verbs": [] }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import List\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_launch_instance_from_ami_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_launch_instance_from_ami(handle, ami_id: str, region: str) -> List:\n", " \"\"\"aws_launch_instance_from_ami Launch instances from a particular image.\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type ami_id: string\n", " :param ami_id: AMI Id Information required to launch an instance.\n", "\n", " :type region: string\n", " :param region: Region to filter instances.\n", "\n", " :rtype: Dict with launched instances info.\n", " \"\"\"\n", " ec2Client = handle.client('ec2', region_name=region)\n", "\n", " res = ec2Client.run_instances(ImageId=ami_id, MinCount=1, MaxCount=1)\n", "\n", " return res['Instances']\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"ami_id\": \"iter_item\",\n", " \"region\": \"region\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"ami_id\",\n", " \"iter_parameter\": \"ami_id\"\n", " }''')\n", "task.configure(outputName=\"launch_instance\")\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_launch_instance_from_ami, lego_printer=aws_launch_instance_from_ami_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "e6396a97-387f-4e18-9f41-efb3d0a6bf96", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 Extension", "orderProperties": [], "tags": [], "title": "Step-1 Extension" }, "source": [ "Output variable:
\n", "launch_instance
In this action, we sort the output from step-1 and get the instance ids.
" ] }, { "cell_type": "code", "execution_count": 15, "id": "5c2feffc-68fe-44bd-bb9b-b6e20244efe3", "metadata": { "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-16T11:48:17.267Z" }, "jupyter": { "source_hidden": true }, "name": "Modify Output", "orderProperties": [], "tags": [], "title": "Modify Output", "credentialsJson": {} }, "outputs": [], "source": [ "instance_ids = []\n", "if launch_instance:\n", " for k, v in launch_instance.items():\n", " for i in v:\n", " instance_ids.append(i[\"InstanceId\"])" ] }, { "cell_type": "markdown", "id": "939c7878-2dc1-42f5-9945-7248fc6b85ba", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "Here we will use unSkript Get AWS Instance Details action to get the details of the instances. This action is used to get details of instances that we received in step 1.
\n", "\n", "\n", "Input parameters:
\n", "instance_id, region
\n", "" ] }, { "cell_type": "code", "execution_count": 16, "id": "966fb848-2bfb-4530-91da-a085ca6c9cd0", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "930c0624b3d32846a0946e0a54dac8e69d7a1ee0e28e10de7338c68f06df8420", "checkEnabled": false, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Get AWS Instances Details", "execution_data": { "last_date_success_run_cell": "2023-02-16T11:51:06.278Z" }, "id": 210, "index": 210, "inputData": [ { "instance_id": { "constant": false, "value": "iter_item" }, "region": { "constant": false, "value": "region" } } ], "inputschema": [ { "properties": { "instance_id": { "description": "ID of the instance.", "title": "Instance Id", "type": "string" }, "region": { "description": "AWS Region of the instance.", "title": "Region", "type": "string" } }, "required": [ "instance_id", "region" ], "title": "aws_get_instance_details", "type": "object" } ], "isUnskript": false, "iterData": [ { "iter_enabled": true, "iter_item": "instance_id", "iter_list": { "constant": false, "objectItems": false, "value": "instance_ids" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_AWS", "name": "Get AWS Instances Details", "nouns": [], "orderProperties": [ "instance_id", "region" ], "output": { "type": "" }, "outputParams": { "output_name": "instance_details", "output_name_enabled": true }, "printOutput": true, "tags": [ "aws_get_instance_details" ], "title": "Get AWS Instances Details", "verbs": [] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from typing import Dict\n", "import pprint\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def aws_get_instances_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def aws_get_instance_details(handle, instance_id: str, region: str) -> Dict:\n", " \"\"\"aws_get_instance_details Returns instance details.\n", "\n", " :type handle: object\n", " :param handle: Object returned by the task.validate(...) method.\n", "\n", " :type instance_ids: list\n", " :param instance_ids: List of instance ids.\n", "\n", " :type region: string\n", " :param region: Region for instance.\n", "\n", " :rtype: Dict with the instance details.\n", " \"\"\"\n", "\n", " ec2client = handle.client('ec2', region_name=region)\n", " instances = []\n", " response = ec2client.describe_instances(\n", " Filters=[{\"Name\": \"instance-id\", \"Values\": [instance_id]}])\n", " for reservation in response[\"Reservations\"]:\n", " for instance in reservation[\"Instances\"]:\n", " instances.append(instance)\n", "\n", " return instances[0]\n", "\n", "\n", "def unskript_default_printer(output):\n", " if isinstance(output, (list, tuple)):\n", " for item in output:\n", " print(f'item: {item}')\n", " elif isinstance(output, dict):\n", " for item in output.items():\n", " print(f'item: {item}')\n", " else:\n", " print(f'Output for {task.name}')\n", " print(output)\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"region\": \"region\",\n", " \"instance_id\": \"iter_item\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"instance_ids\",\n", " \"iter_parameter\": \"instance_id\"\n", " }''')\n", "task.configure(outputName=\"instance_details\")\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(aws_get_instance_details, lego_printer=unskript_default_printer, hdl=hdl, args=args)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "466d72b2-48f8-45cc-b587-08b23129f43e", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2 Extension", "orderProperties": [], "tags": [], "title": "Step-2 Extension" }, "source": [ "Output variable:
\n", "instance_details
In this action, we sort the output from step-2 and present the details of the instance in the good table.
" ] }, { "cell_type": "code", "execution_count": 17, "id": "7ae145b4-6660-4441-942e-74e984318779", "metadata": { "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-16T11:51:11.921Z" }, "jupyter": { "source_hidden": true }, "name": "Instance Details", "orderProperties": [], "tags": [], "title": "Instance Details", "credentialsJson": {} }, "outputs": [], "source": [ "import pprint\n", "from pydantic import BaseModel, Field\n", "from typing import Dict\n", "from tabulate import tabulate\n", "task.configure(printOutput=True)\n", "\n", "output = instance_details\n", "instance_list = instance_ids\n", "\n", "\n", "def Instance_Details(output, instance_list: list):\n", " data1 = []\n", " Header = \"\"\n", " for instance_id in instance_list:\n", " if instance_id in output.keys():\n", " output1 = output[instance_id]\n", " if isinstance(output1, (list, tuple)):\n", " for item in output1:\n", " print(f'item: {item}')\n", " elif isinstance(output1, dict):\n", " for key, value in output1.items():\n", " if isinstance(value, (list)):\n", " pass\n", " else:\n", " if key == \"InstanceId\":\n", " Header = value\n", " data1.append([key,value])\n", " print(f'\\n\\033[1m Table for Instance ID : {Header} \\033[0;0m')\n", " print(tabulate(data1))\n", " else:\n", " print(f'Output for {task.name}')\n", " print(output1)\n", "\n", "\n", "Instance_Details(output, instance_list)" ] }, { "attachments": {}, "cell_type": "markdown", "id": "1e18a18c-f822-452d-9a94-719f23734fa4", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "### Conclusion\n", "In this Runbook, we demonstrated the use of unSkript's AWS legos to perform AWS actions and this runbook launched EC2 instances from AMI and show the details of the instance. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Launch AWS EC2 from AMI", "parameters": [ "ami_id", "region" ] }, "kernelspec": { "display_name": "unSkript (Build: 891)", "name": "python_kubernetes" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3" }, "parameterSchema": { "properties": { "ami_id": { "description": "List of AMI IDs to be the launch instance.", "title": "ami_id", "type": "array" }, "region": { "description": "AWS region", "title": "region", "type": "string" } }, "required": [ "ami_id", "region" ], "title": "Schema", "type": "object" }, "parameterValues": null }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: AWS/Run_EC2_from_AMI.json ================================================ { "name": "Launch AWS EC2 from AMI", "description": "This lego can be used to launch an AWS EC2 instance from AMI in the given region.", "uuid": "61fc20fd176f9b1d491d4d6cb58aab4d33759405874fbf8c83716c67bcdb52cc", "icon": "CONNECTOR_TYPE_AWS", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_AWS" ], "version": "1.0.0" } ================================================ FILE: AWS/Troubleshooting_Your_EC2_Configuration_in_Private_Subnet.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "2d5c877a-6cb6-46fa-b902-3a631c5798b4", "metadata": { "jupyter": { "source_hidden": false }, "name": "Troubleshooting Your EC2 Configuration in a Private Subnet", "orderProperties": [], "tags": [], "title": "Troubleshooting Your EC2 Configuration in a Private Subnet" }, "source": [ "
](https://raw.githubusercontent.com/unskript/Awesome-CloudOps-Automation/master/.github/images/runbooksh_dark.png)
[
](https://raw.githubusercontent.com/unskript/Awesome-CloudOps-Automation/master/.github/images/runbooksh_light.png)
# AWS Start IAM Policy Generation
## Description
Given a region, a CloudTrail ARN (where the logs are being recorded), a reference IAM ARN (whose usage we will parse), and a Service role, this will begin the generation of a IAM policy. The output is a String of the generation Id.
## Action Details
```python
action.start_iam_policy_generation(handle, region:str, CloudTrailARN:str, IAMPrincipalARN:str, AccessRole:str, hours:float) -> str
```
- `handle`: Object of type unSkript AWS Connector.
- `region`: AWS region where CloudTrail logs are recorded.
- `CloudTrailARN`: ARN of the logs you wish to parse.
- `IAMPrincipalARN`: Reference ARN - we are copying the usage from this account.
- `AccessRole`: Role that allows access to logs.
- `hours`: Hours of data to parse.
## Action Output
This action will return a string value representing the generation Id.
## See it in Action
You can try out this action on the [runbooks.sh](http://runbooks.sh) open-source platform or on the [unSkript Cloud Free Trial](https://us.app.unskript.io).
Feel free to join the community Slack at [https://communityinviter.com/apps/cloud-ops-community/awesome-cloud-automation](https://communityinviter.com/apps/cloud-ops-community/awesome-cloud-automation) for support, questions, and comments
================================================
FILE: AWS/legos/AWS_Start_IAM_Policy_Generation/__init__.py
================================================
================================================
FILE: AWS/legos/__init__.py
================================================
#
# unSkript (c) 2022
#
================================================
FILE: AWS/legos/aws_add_lifecycle_configuration_to_s3_bucket/README.md
================================================
[
](https://raw.githubusercontent.com/unskript/Awesome-CloudOps-Automation/master/.github/images/runbooksh_dark.png)
[
](https://raw.githubusercontent.com/unskript/Awesome-CloudOps-Automation/master/.github/images/runbooksh_light.png)
## See it in Action
You can try out this action by visiting the following links:
- [Runbooks.sh](http://runbooks.sh): Open source Runbooks and Cloud Automation.
- [unSkript Live](https://us.app.unskript.io): Cloud free trial.
- Community Slack: Join the [Cloud Ops Community](https://communityinviter.com/apps/cloud-ops-community/awesome-cloud-automation) for support, questions, and comments.
================================================
FILE: AWS/legos/aws_add_lifecycle_configuration_to_s3_bucket/__init__.py
================================================
================================================
FILE: AWS/legos/aws_add_lifecycle_configuration_to_s3_bucket/aws_add_lifecycle_configuration_to_s3_bucket.json
================================================
{
"action_title": "Add Lifecycle Configuration to AWS S3 Bucket",
"action_description": "Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_add_lifecycle_configuration_to_s3_bucket",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_S3" ]
}
================================================
FILE: AWS/legos/aws_add_lifecycle_configuration_to_s3_bucket/aws_add_lifecycle_configuration_to_s3_bucket.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Dict, Optional
import pprint
class InputSchema(BaseModel):
region: str = Field(
description='AWS Region.',
title='Region')
bucket_name: str = Field(
description='The name of the bucket for which to set the configuration.',
title='Bucket Name',
)
expiration_days: Optional[float] = Field(
30,
description='Specifies the expiration for the lifecycle of the object in the form of days. Eg: 30 (days)',
title='Expiration Days',
)
prefix: Optional[str] = Field(
'',
description='Prefix identifying one or more objects to which the rule applies.',
title='Prefix',
)
noncurrent_days: Optional[float] = Field(
30,
description='Specifies the number of days an object is noncurrent before Amazon S3 permanently deletes the noncurrent object versions',
title='Noncurrent Days',
)
def aws_add_lifecycle_configuration_to_s3_bucket_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_add_lifecycle_configuration_to_s3_bucket(handle, region: str, bucket_name:str, expiration_days:int=30, prefix:str='', noncurrent_days:int=30) -> Dict:
"""aws_add_lifecycle_configuration_to_s3_bucket returns response of adding lifecycle configuration
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: location of the bucket
:type bucket_name: string
:param bucket_name: The name of the bucket for which to set the configuration.
:type expiration_days: int
:param expiration_days: Specifies the expiration for the lifecycle of the object in the form of days. Eg: 30 (days)
:type prefix: string
:param prefix: location of the bucket
:type noncurrent_days: int
:param noncurrent_days: Specifies the number of days an object is noncurrent before Amazon S3 permanently deletes the noncurrent object versions.
:rtype: Dict of the response of adding lifecycle configuration
"""
s3Client = handle.client("s3", region_name=region)
try:
lifecycle_config = {
'Rules': [
{
'Expiration': {
'Days': expiration_days,
},
'Filter': {
'Prefix': ''
},
'Status': 'Enabled',
'NoncurrentVersionExpiration': {
'NoncurrentDays': noncurrent_days
}
}
]
}
bucket_name = 'testrunbook'
response = s3Client.put_bucket_lifecycle_configuration(
Bucket=bucket_name,
LifecycleConfiguration=lifecycle_config
)
except Exception as e:
raise e
return response
================================================
FILE: AWS/legos/aws_apply_default_encryption_for_s3_buckets/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_apply_default_encryption_for_s3_buckets/__init__.py
================================================
================================================
FILE: AWS/legos/aws_apply_default_encryption_for_s3_buckets/aws_apply_default_encryption_for_s3_buckets.json
================================================
{
"action_title": "Apply AWS Default Encryption for S3 Bucket",
"action_description": "Apply AWS Default Encryption for S3 Bucket",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_apply_default_encryption_for_s3_buckets",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_S3" ]
}
================================================
FILE: AWS/legos/aws_apply_default_encryption_for_s3_buckets/aws_apply_default_encryption_for_s3_buckets.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region.')
bucket_name: str = Field(
title='Bucket Name',
description='AWS S3 Bucket Name.')
def aws_apply_default_encryption_for_s3_buckets_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_apply_default_encryption_for_s3_buckets(handle, bucket_name: str, region: str) -> Dict:
"""aws_put_bucket_encryption Puts default encryption configuration for bucket.
:type handle: object
:param handle: Object returned from task.validate(...).
:type bucket_name: string
:param bucket_name: Name of the S3 bucket.
:type region: string
:param region: location of the bucket
:rtype: Dict with the response info.
"""
s3Client = handle.client('s3', region_name=region)
result = {}
# Setup default encryption configuration
try:
response = s3Client.put_bucket_encryption(
Bucket=bucket_name,
ServerSideEncryptionConfiguration={
"Rules": [
{"ApplyServerSideEncryptionByDefault": {"SSEAlgorithm": "AES256"}}
]},
)
result['Response'] = response
except Exception as e:
result['Error'] = e
return result
================================================
FILE: AWS/legos/aws_attach_ebs_to_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_attach_ebs_to_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_attach_ebs_to_instances/aws_attach_ebs_to_instances.json
================================================
{
"action_title": "Attach an EBS volume to an AWS EC2 Instance",
"action_description": "Attach an EBS volume to an AWS EC2 Instance",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_attach_ebs_to_instances",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_attach_ebs_to_instances/aws_attach_ebs_to_instances.py
================================================
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region of the EBS volume')
instance_id: str = Field(
title='Instance Id',
description='ID of the EC2 instance')
volume_id: str = Field(
title='Volume Id',
description='ID of the EBS volume')
device_name: str = Field(
title='Device Name',
description='The device name')
def aws_attach_ebs_to_instances_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_attach_ebs_to_instances(
handle: Session,
region: str,
instance_id: str,
volume_id: str,
device_name: str
) -> Dict:
"""aws_attach_ebs_to_instances Attach instances under a particular Elastic Block Store (EBS).
:type region: string
:param region: AWS Region of the EBS volume
:type instance_id: string
:param instance_id: ID of the instance
:type volume_id: string
:param volume_id: The ID of the volume
:type device_name: string
:param device_name: The device name
:rtype: dict with registered instance details.
"""
ec2Client = handle.client('ec2', region_name=region)
response = ec2Client.attach_volume(
Device=device_name,
InstanceId=instance_id,
VolumeId=volume_id
)
return response
================================================
FILE: AWS/legos/aws_attach_iam_policy/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_attach_iam_policy/__init__.py
================================================
================================================
FILE: AWS/legos/aws_attach_iam_policy/aws_attach_iam_policy.json
================================================
{
"action_title": "AWS Attach New Policy to User",
"action_description": "AWS Attach New Policy to User",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_attach_iam_policy",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_IAM" ]
}
================================================
FILE: AWS/legos/aws_attach_iam_policy/aws_attach_iam_policy.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from botocore.exceptions import ClientError
class InputSchema(BaseModel):
user_name: str = Field(
title='User Name',
description='IAM user whose policies need to fetched.')
policy_name: str = Field(
title='Policy Name',
description='Policy name to apply the permissions to the user.')
def aws_attach_iam_policy_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_attach_iam_policy(handle, user_name: str, policy_name: str) -> Dict:
"""aws_attache_iam_policy used to provide user permissions.
:type handle: object
:param handle: Object returned from task.validate(...).
:type user_name: string
:param user_name: Dictionary of credentials info.
:type policy_name: string
:param policy_name: Policy name to apply the permissions to the user.
:rtype: Dict with User policy information.
"""
result = {}
iamResource = handle.resource('iam')
try:
user = iamResource.User(user_name)
response = user.attach_policy(
PolicyArn='arn:aws:iam::aws:policy/'+policy_name
)
result = response
except ClientError as error:
result = error.response
return result
================================================
FILE: AWS/legos/aws_attach_tags_to_resources/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_attach_tags_to_resources/__init__.py
================================================
================================================
FILE: AWS/legos/aws_attach_tags_to_resources/aws_attach_tags_to_resources.json
================================================
{
"action_title": "AWS Attach Tags to Resources",
"action_description": "AWS Attach Tags to Resources",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_attach_tags_to_resources",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_DEVOPS","CATEGORY_TYPE_AWS" ]
}
================================================
FILE: AWS/legos/aws_attach_tags_to_resources/aws_attach_tags_to_resources.py
================================================
from __future__ import annotations
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
region: str = Field(..., description='AWS Region.', title='Region')
resource_arn: List = Field(..., description='Resource ARNs.', title='Resource ARN')
tag_key: str = Field(..., description='Resource Tag Key.', title='Tag Key')
tag_value: str = Field(..., description='Resource Tag Value.', title='Tag Value')
# This API has a limit of 20 ARNs per api call...
#we'll need to break up the list into chunks of 20
def break_list(long_list, max_size):
return [long_list[i:i + max_size] for i in range(0, len(long_list), max_size)]
def aws_attach_tags_to_resources_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_attach_tags_to_resources(
handle,
resource_arn: list,
tag_key: str,
tag_value: str,
region: str
) -> Dict:
"""aws_attach_tags_to_resources Returns an Dict of resource info.
:type handle: object
:param handle: Object returned from task.validate(...).
:type resource_arn: list
:param resource_arn: Resource ARNs.
:type tag_key: str
:param tag_key: Resource Tag Key.
:type tag_value: str
:param tag_value: Resource Tag value.
:type region: str
:param region: Region to filter resources.
:rtype: Dict of resource info.
"""
ec2Client = handle.client('resourcegroupstaggingapi', region_name=region)
result = {}
#break the ARN list into groups of 20 to send through the API
list_of_lists = break_list(resource_arn, 20)
for index, smallerList in enumerate(list_of_lists):
try:
response = ec2Client.tag_resources(
ResourceARNList=smallerList,
Tags={tag_key: tag_value}
)
result[index] = response
except Exception as error:
result[f"{index} error"] = error
return result
================================================
FILE: AWS/legos/aws_change_acl_permissions_of_buckets/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_change_acl_permissions_of_buckets/__init__.py
================================================
================================================
FILE: AWS/legos/aws_change_acl_permissions_of_buckets/aws_change_acl_permissions_of_buckets.json
================================================
{
"action_title": "AWS Change ACL Permission of public S3 Bucket",
"action_description": "AWS Change ACL Permission public S3 Bucket",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_change_acl_permissions_of_buckets",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_is_remediation": true,
"action_categories": [ "CATEGORY_TYPE_SECOPS","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_S3" ]
}
================================================
FILE: AWS/legos/aws_change_acl_permissions_of_buckets/aws_change_acl_permissions_of_buckets.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, Dict
from pydantic import BaseModel, Field
from unskript.enums.aws_canned_acl_enums import CannedACLPermissions
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region.')
bucket_name: str = Field(
title='Bucket Name',
description='AWS S3 Bucket Name.')
acl: Optional[CannedACLPermissions] = Field(
title='Canned ACL Permission',
description=("Canned ACL Permission type - 'private'|'public-read'|'public-read-write"
"'|'authenticated-read'."))
def aws_change_acl_permissions_of_buckets_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_change_acl_permissions_of_buckets(
handle,
bucket_name: str,
acl: CannedACLPermissions=CannedACLPermissions.Private,
region: str = None
) -> Dict:
""" aws_put_bucket_acl get Dict of buckets ACL change info.
:type handle: Session
:param handle: Object returned by the task.validate(...) method
:type bucket_name: string
:param bucket_name: S3 bucket name where to set ACL on.
:type acl: CannedACLPermissions
:param acl: Canned ACL Permission type - 'private'|'public-read'|'public-read-write
'|'authenticated-read'.
:type region: string
:param region: location of the bucket.
:rtype: Dict of buckets ACL change info
"""
# connect to the S3 using client
all_permissions = acl
if acl is None or len(acl)==0:
all_permissions = "private"
s3Client = handle.client('s3',
region_name=region)
# Put bucket ACL for the permissions grant
response = s3Client.put_bucket_acl(
Bucket=bucket_name,
ACL=all_permissions )
return response
================================================
FILE: AWS/legos/aws_check_rds_non_m5_t3_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_check_rds_non_m5_t3_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_check_rds_non_m5_t3_instances/aws_check_rds_non_m5_t3_instances.json
================================================
{
"action_title": "AWS Check if RDS instances are not M5 or T3",
"action_description": "AWS Check if RDS instances are not M5 or T3",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_check_rds_non_m5_t3_instances",
"action_needs_credential": true,
"action_is_check": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": ["CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_RDS"],
"action_next_hop": [],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: AWS/legos/aws_check_rds_non_m5_t3_instances/aws_check_rds_non_m5_t3_instances.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.connectors.aws import aws_get_paginator
class InputSchema(BaseModel):
region: Optional[str] = Field(
'',
title='AWS Region',
description='AWS Region.'
)
def aws_check_rds_non_m5_t3_instances_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_check_rds_non_m5_t3_instances(handle, region: str = "") -> Tuple:
"""aws_check_rds_non_m5_t3_instances Gets all DB instances that are not m5 or t3.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: AWS Region.
:rtype: A tuple with a status flag and a list of DB instances that are not m5 or t3.
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
ec2Client = handle.client('rds', region_name=reg)
response = aws_get_paginator(ec2Client, "describe_db_instances", "DBInstances")
for db in response:
db_instance_dict = {}
if db['DBInstanceClass'][3:5] not in ['m5', 't3']:
db_instance_dict["region"] = reg
db_instance_dict["instance"] = db['DBInstanceIdentifier']
result.append(db_instance_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_check_ssl_certificate_expiry/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_check_ssl_certificate_expiry/__init__.py
================================================
##
## Copyright (c) 2022 unSkript, Inc
## All rights reserved.
##
================================================
FILE: AWS/legos/aws_check_ssl_certificate_expiry/aws_check_ssl_certificate_expiry.json
================================================
{
"action_title": "Check SSL Certificate Expiry",
"action_description": "Check ACM SSL Certificate expiry date",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_check_ssl_certificate_expiry",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_SECOPS","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ACM" ]
}
================================================
FILE: AWS/legos/aws_check_ssl_certificate_expiry/aws_check_ssl_certificate_expiry.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
import datetime
import dateutil
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
threshold_days: int = Field(
title="Threshold Days",
description=("Threshold number of days to check for expiry. "
"Eg: 30 -lists all certificates which are expiring within 30 days")
)
region: str = Field(
title='Region',
description='Name of the AWS Region'
)
def aws_check_ssl_certificate_expiry_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_check_ssl_certificate_expiry(handle, threshold_days: int, region: str,) -> Dict:
"""aws_check_ssl_certificate_expiry returns all the ACM issued certificates
which are about to expire.
:type handle: object
:param handle: Object returned from Task Validate
:type threshold_days: int
:param threshold_days: Threshold number of days to check for expiry.
Eg: 30 -lists all certificates which are expiring within 30 days
:type region: str
:param region: Region name of the AWS account
:rtype: Result Dictionary of result
"""
iamClient = handle.client('acm', region_name=region)
arn_list=[]
domain_list = []
days_list= []
expiring_domain_list={}
result={}
certificates_list = iamClient.list_certificates(CertificateStatuses=['ISSUED'])
for each_arn in certificates_list['CertificateSummaryList']:
arn_list.append(each_arn['CertificateArn'])
domain_list.append(each_arn['DomainName'])
for certificate in arn_list:
details = iamClient.describe_certificate(CertificateArn=certificate)
for key,value in details['Certificate'].items():
if key == "NotAfter":
expiry_date = value
right_now = datetime.datetime.now(dateutil.tz.tzlocal())
diff = expiry_date-right_now
days_remaining = diff.days
days = 0
if 0 < days_remaining < threshold_days:
days = days_remaining
elif days_remaining < 0:
days = days_remaining
elif days_remaining > threshold_days:
days = days_remaining
days_list.append(days)
for i, n in enumerate(domain_list):
result[n] = days_list[i]
for k,v in result.items():
if v < threshold_days:
expiring_domain_list[k]=v
return expiring_domain_list
================================================
FILE: AWS/legos/aws_cloudwatch_attach_webhook_notification_to_alarm/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_cloudwatch_attach_webhook_notification_to_alarm/__init__.py
================================================
================================================
FILE: AWS/legos/aws_cloudwatch_attach_webhook_notification_to_alarm/aws_cloudwatch_attach_webhook_notification_to_alarm.json
================================================
{
"action_title": "Attach a webhook endpoint to AWS Cloudwatch alarm",
"action_description": "Attach a webhook endpoint to one of the SNS attached to the AWS Cloudwatch alarm.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_cloudwatch_attach_webhook_notification_to_alarm",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_CLOUDWATCH" ]
}
================================================
FILE: AWS/legos/aws_cloudwatch_attach_webhook_notification_to_alarm/aws_cloudwatch_attach_webhook_notification_to_alarm.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from urllib.parse import urlparse
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
class InputSchema(BaseModel):
alarm_name: str = Field(
title="Alarm name",
description="Cloudwatch alarm name.",
)
region: str = Field(
title="Region",
description="AWS Region of the cloudwatch.")
url: str = Field(
title="URL",
description=("URL where the alarm notification needs to be sent. "
"URL should start with http or https.")
)
def aws_cloudwatch_attach_webhook_notification_to_alarm_printer(output):
if output is None:
return
pprint.pprint({"Subscription ARN" : output})
def aws_cloudwatch_attach_webhook_notification_to_alarm(
hdl: Session,
alarm_name: str,
region: str,
url: str
) -> str:
"""aws_cloudwatch_attach_webhook_notification_to_alarm returns subscriptionArn
:type alarm_name: string
:param alarm_name: Cloudwatch alarm name.
:type url: string
:param url: URL where the alarm notification needs to be sent.
:type region: string
:param region: AWS Region of the cloudwatch.
:rtype: Returns subscriptionArn
"""
cloudwatchClient = hdl.client("cloudwatch", region_name=region)
# Get the configured SNS(es) to this alarm.
alarmDetail = cloudwatchClient.describe_alarms(
AlarmNames=[alarm_name]
)
if alarmDetail is None:
return f'Alarm {alarm_name} not found in AWS region {region}'
# Need to get the AlarmActions from either composite or metric field.
if len(alarmDetail['CompositeAlarms']) > 0:
snses = alarmDetail['CompositeAlarms'][0]['AlarmActions']
else:
snses = alarmDetail['MetricAlarms'][0]['AlarmActions']
#Pick any sns to configure the url endpoint.
if len(snses) == 0:
return f'No SNS configured for alarm {alarm_name}'
snsArn = snses[0]
print(f'Configuring url endpoint on SNS {snsArn}')
snsClient = hdl.client('sns', region_name=region)
# Figure out the protocol from the url
try:
parsedURL = urlparse(url)
except Exception as e:
print(f'Invalid URL {url}, {e}')
raise e
if parsedURL.scheme not in ('http', 'https'):
return f'Invalid URL {url}'
protocol = parsedURL.scheme
try:
response = snsClient.subscribe(
TopicArn=snsArn,
Protocol=protocol,
Endpoint=url,
ReturnSubscriptionArn=True)
except Exception as e:
print(f'Subscribe to SNS topic arn {snsArn} failed, {e}')
raise e
subscriptionArn = response['SubscriptionArn']
print(f'URL {url} subscribed to SNS {snsArn}, subscription ARN {subscriptionArn}')
return subscriptionArn
================================================
FILE: AWS/legos/aws_create_IAMpolicy/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_create_IAMpolicy/__init__.py
================================================
================================================
FILE: AWS/legos/aws_create_IAMpolicy/aws_create_IAMpolicy.json
================================================
{
"action_title": "AWS Create IAM Policy",
"action_description": "Given an AWS policy (as a string), and the name for the policy, this will create an IAM policy.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_create_IAMpolicy",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_IAM" ,"CATEGORY_TYPE_IAM" ]
}
================================================
FILE: AWS/legos/aws_create_IAMpolicy/aws_create_IAMpolicy.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def aws_create_IAMpolicy_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_create_IAMpolicy(handle, policyDocument:str, PolicyName:str) -> Dict:
client = handle.client('iam')
response = client.create_policy(
PolicyName=PolicyName,
PolicyDocument=policyDocument,
Description='generated Via unSkript',
)
return response
================================================
FILE: AWS/legos/aws_create_access_key/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_create_access_key/__init__.py
================================================
================================================
FILE: AWS/legos/aws_create_access_key/aws_create_access_key.json
================================================
{
"action_title": "AWS Create Access Key",
"action_description": "Create a new Access Key for the User",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_create_access_key",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_is_remediation": true,
"action_categories": [ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_IAM" ,"CATEGORY_TYPE_IAM" ]
}
================================================
FILE: AWS/legos/aws_create_access_key/aws_create_access_key.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
aws_username: str = Field(
title="Username",
description="Username of the IAM User"
)
def aws_create_access_key_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_create_access_key(
handle,
aws_username: str
) -> Dict:
"""aws_create_access_key creates a new access key for the given user.
:type handle: object
:param handle: Object returned from Task Validate
:type aws_username: str
:param aws_username: Username of the IAM user to be looked up
:rtype: Result Dictionary of result
"""
iamClient = handle.client('iam')
result = iamClient.create_access_key(UserName=aws_username)
retVal = {}
temp_list = []
for key, value in result.items():
if key not in temp_list:
temp_list.append(key)
retVal[key] = value
return retVal
================================================
FILE: AWS/legos/aws_create_bucket/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_create_bucket/__init__.py
================================================
================================================
FILE: AWS/legos/aws_create_bucket/aws_create_bucket.json
================================================
{
"action_title": "Create AWS Bucket",
"action_description": "Create a new AWS S3 Bucket",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_create_bucket",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_S3" ]
}
================================================
FILE: AWS/legos/aws_create_bucket/aws_create_bucket.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, Dict
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
class InputSchema(BaseModel):
name: str = Field(
title='Bucket Name',
description='Name of the bucket to be created.')
acl: str = Field(
title='ACL',
description=('The Canned ACL to apply to the bucket. Possible values: '
'private, public-read, public-read-write, authenticated-read.'))
region: Optional[str] = Field(
title='Region',
description='AWS Region of the bucket.')
def aws_create_bucket_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_create_bucket(handle: Session, name: str, acl: str, region: str = None) -> Dict:
"""aws_create_bucket Creates a new bucket.
:rtype: Dict with the new bucket info.
"""
# Input param validation.
if region is None:
s3Client = handle.client('s3')
res = s3Client.create_bucket(
ACL=acl,
Bucket=name)
else:
s3Client = handle.client('s3', region_name=region)
res = s3Client.create_bucket(
ACL=acl,
Bucket=name,
CreateBucketConfiguration={
'LocationConstraint': region})
return res
================================================
FILE: AWS/legos/aws_create_iam_user/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_create_iam_user/__init__.py
================================================
================================================
FILE: AWS/legos/aws_create_iam_user/aws_create_iam_user.json
================================================
{
"action_title": "Create New IAM User",
"action_description": "Create New IAM User",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_create_iam_user",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_IAM" ,"CATEGORY_TYPE_IAM" ]
}
================================================
FILE: AWS/legos/aws_create_iam_user/aws_create_iam_user.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from botocore.exceptions import ClientError
from beartype import beartype
class InputSchema(BaseModel):
user_name: str = Field(
title='User Name',
description='IAM User Name.')
tag_key: str = Field(
title='Tag Key',
description='Tag Key to new IAM User.')
tag_value: str = Field(
title='Tag Value',
description='Tag Value to new IAM User.')
@beartype
def aws_create_iam_user_printer(output):
if output is None:
return
pprint.pprint(output)
@beartype
def aws_create_iam_user(handle, user_name: str, tag_key: str, tag_value: str) -> Dict:
"""aws_create_iam_user Creates new IAM User.
:type handle: object
:param handle: Object returned by the task.validate(...) method
:type user_name: string
:param user_name: Name of new IAM User.
:type tag_key: string
:param tag_key: Tag Key assign to new User.
:type tag_value: string
:param tag_value: Tag Value assign to new User.
:rtype: Dict with the stopped instances state info.
"""
ec2Client = handle.client("iam")
result = {}
try:
response = ec2Client.create_user(
UserName=user_name,
Tags=[
{
'Key': tag_key,
'Value': tag_value
}])
result = response
except ClientError as error:
if error.response['Error']['Code'] == 'EntityAlreadyExists':
result = error.response
else:
result = error.response
return result
================================================
FILE: AWS/legos/aws_create_redshift_query/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_create_redshift_query/__init__.py
================================================
================================================
FILE: AWS/legos/aws_create_redshift_query/aws_create_redshift_query.json
================================================
{
"action_title": "AWS Redshift Query",
"action_description": "Make a SQL Query to the given AWS Redshift database",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_create_redshift_query",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_REDSHIFT" ]
}
================================================
FILE: AWS/legos/aws_create_redshift_query/aws_create_redshift_query.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from __future__ import annotations
from pydantic import BaseModel, Field
from beartype import beartype
class InputSchema(BaseModel):
region: str = Field(..., description='AWS Region.', title='Region')
query: str = Field(
description='sql query to run',
title='query',
)
cluster: str = Field(
description='Name of Redshift Cluster', title='cluster'
)
database: str = Field(
description='Name of your Redshift database', title='database'
)
secretArn: str = Field(
description='Value of your Secrets Manager ARN', title='secretArn'
)
@beartype
def aws_create_redshift_query(
handle,
region: str,
cluster:str,
database:str,
secretArn: str,
query:str
) -> str:
# Input param validation.
#major change
client = handle.client('redshift-data', region_name=region)
# execute the query
response = client.execute_statement(
ClusterIdentifier=cluster,
Database=database,
SecretArn=secretArn,
Sql=query
)
resultId = response['Id']
print(response)
print("resultId",resultId)
return resultId
#make a change
================================================
FILE: AWS/legos/aws_create_user_login_profile/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_create_user_login_profile/__init__.py
================================================
================================================
FILE: AWS/legos/aws_create_user_login_profile/aws_create_user_login_profile.json
================================================
{
"action_title": "Create Login profile for IAM User",
"action_description": "Create Login profile for IAM User",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_create_user_login_profile",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_IAM" ,"CATEGORY_TYPE_IAM" ]
}
================================================
FILE: AWS/legos/aws_create_user_login_profile/aws_create_user_login_profile.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from botocore.exceptions import ClientError
class InputSchema(BaseModel):
user_name: str = Field(
title='User Name',
description='IAM User Name.')
password: str = Field(
title='Password',
description='Password for IAM User.')
def aws_create_user_login_profile_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_create_user_login_profile(
handle,
user_name: str,
password: str
) -> Dict:
"""aws_create_user_login_profile Create login profile for IAM User.
:type handle: object
:param handle: Object returned by the task.validate(...) method.
:type user_name: string
:param user_name: Name of new IAM User.
:type password: string
:param password: temporary password for new User.
:rtype: Dict with the Profile Creation status info.
"""
ec2Client = handle.client("iam")
result = {}
try:
response = ec2Client.create_login_profile(
UserName=user_name,
Password=password,
PasswordResetRequired=True)
result = response
except ClientError as error:
if error.response['Error']['Code'] == 'EntityAlreadyExists':
result = error.response
else:
result = error.response
return result
================================================
FILE: AWS/legos/aws_create_volumes_snapshot/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_create_volumes_snapshot/__init__.py
================================================
##
## Copyright (c) 2022 unSkript, Inc
## All rights reserved.
##
================================================
FILE: AWS/legos/aws_create_volumes_snapshot/aws_create_volumes_snapshot.json
================================================
{
"action_title": "AWS Create Snapshot For Volume",
"action_description": "Create a snapshot for EBS volume of the EC2 Instance for backing up the data stored in EBS",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_create_volumes_snapshot",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_create_volumes_snapshot/aws_create_volumes_snapshot.py
================================================
##
## Copyright (c) 2022 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
volume_id: str = Field(
title='Volume ID',
description='Volume ID to create snapshot for particular volume e.g. vol-01eb21cfce30a956c')
region: str = Field(
title='Region',
description='AWS Region.')
def aws_create_volumes_snapshot_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_create_volumes_snapshot(handle, volume_id: str, region: str) -> List:
"""aws_create_volumes_snapshot Returns an list containing SnapshotId.
:type region: string
:param region: used to filter the volume for a given region.
:type volume_id: string
:param volume_id: Volume ID to create snapshot for particular volume.
:rtype: List containing SnapshotId.
"""
result = []
ec2Client = handle.resource('ec2', region_name=region)
try:
response = ec2Client.create_snapshot(VolumeId=volume_id)
result.append(response)
except Exception as e:
raise e
return result
================================================
FILE: AWS/legos/aws_delete_access_key/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_delete_access_key/__init__.py
================================================
================================================
FILE: AWS/legos/aws_delete_access_key/aws_delete_access_key.json
================================================
{
"action_title": "AWS Delete Access Key",
"action_description": "Delete an Access Key for a User",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_delete_access_key",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_is_remediation": true,
"action_categories": [ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_IAM" ,"CATEGORY_TYPE_IAM" ]
}
================================================
FILE: AWS/legos/aws_delete_access_key/aws_delete_access_key.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
aws_username: str = Field(
title="Username",
description="Username of the IAM User"
)
aws_access_key_id: str = Field(
title="Access Key ID",
description="Old Access Key ID of the User"
)
def aws_delete_access_key_printer(output):
if output is None:
return
pprint.pprint("Access Key successfully deleted")
pprint.pprint(output)
def aws_delete_access_key(
handle,
aws_username: str,
aws_access_key_id: str,
) -> Dict:
"""aws_delete_access_key deleted the given access key.
:type handle: object
:param handle: Object returned from Task Validate
:type aws_username: str
:param aws_username: Username of the IAM user to be looked up
:type aws_access_key_id: str
:param aws_access_key_id: Old Access Key ID of the user which needs to be deleted
:rtype: Result Status Dictionary of result
"""
iamClient = handle.client('iam')
result = iamClient.delete_access_key(UserName=aws_username, AccessKeyId=aws_access_key_id)
retVal = {}
temp_list = []
for key, value in result.items():
if key not in temp_list:
temp_list.append(key)
retVal[key] = value
return retVal
================================================
FILE: AWS/legos/aws_delete_bucket/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_delete_bucket/__init__.py
================================================
================================================
FILE: AWS/legos/aws_delete_bucket/aws_delete_bucket.json
================================================
{
"action_title": "Delete AWS Bucket",
"action_description": "Delete an AWS S3 Bucket",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_delete_bucket",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_S3" ]
}
================================================
FILE: AWS/legos/aws_delete_bucket/aws_delete_bucket.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, Dict
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
class InputSchema(BaseModel):
name: str = Field(
title='Bucket Name',
description='Name of the bucket to be deleted.')
region: Optional[str] = Field(
title='Region',
description='AWS Region of the bucket.')
def aws_delete_bucket_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_delete_bucket(handle: Session, name: str, region: str = None) -> Dict:
"""aws_delete_bucket Deletes a bucket.
:type handle: object
:param handle: Object returned from Task Validate
:type name: string
:param name: Name of the bucket to be deleted.
:type region: string
:param region: AWS Region of the bucket.
:rtype: Dict with the deleted bucket info.
"""
if region is None:
s3Client = handle.client('s3')
else:
s3Client = handle.client('s3', region_name=region)
res = s3Client.delete_bucket(Bucket=name)
return res
================================================
FILE: AWS/legos/aws_delete_classic_load_balancer/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_delete_classic_load_balancer/__init__.py
================================================
================================================
FILE: AWS/legos/aws_delete_classic_load_balancer/aws_delete_classic_load_balancer.json
================================================
{
"action_title": "AWS Delete Classic Load Balancer",
"action_description": "Delete Classic Elastic Load Balancers",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_delete_classic_load_balancer",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_delete_classic_load_balancer/aws_delete_classic_load_balancer.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Dict
import pprint
class InputSchema(BaseModel):
region: str = Field(..., description='AWS Region.', title='Region')
elb_name: str = Field(..., description='Name of classic ELB', title='Classic Load Balancer Name')
def aws_delete_classic_load_balancer_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_delete_classic_load_balancer(handle, region: str, elb_name: str) -> Dict:
"""aws_delete_classic_load_balancer reponse of deleting a classic load balancer.
:type region: string
:param region: AWS Region.
:type elb_name: string
:param elb_name: Classic load balancer name.
:rtype: dict of deleted load balancers reponse.
"""
try:
elblient = handle.client('elb', region_name=region)
response = elblient.delete_load_balancer(LoadBalancerName=elb_name)
return response
except Exception as e:
raise Exception(e)
================================================
FILE: AWS/legos/aws_delete_ebs_snapshot/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_delete_ebs_snapshot/__init__.py
================================================
================================================
FILE: AWS/legos/aws_delete_ebs_snapshot/aws_delete_ebs_snapshot.json
================================================
{
"action_title": "AWS Delete EBS Snapshot",
"action_description": "Delete EBS Snapshot for an EC2 instance",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_delete_ebs_snapshot",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_EBS"]
}
================================================
FILE: AWS/legos/aws_delete_ebs_snapshot/aws_delete_ebs_snapshot.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region.')
snapshot_id: str = Field(
title='Snapshot ID',
description='EBS snapshot ID. Eg: "snap-34bt4bfjed9d"')
def aws_delete_ebs_snapshot_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_delete_ebs_snapshot(handle, region: str, snapshot_id: str) -> Dict:
"""aws_delete_ebs_snapshot Returns a dict of deleted snapshot details
:type region: string
:param region: AWS Region.
:type snapshot_id: string
:param snapshot_id: EBS snapshot ID. Eg: 'snap-34bt4bfjed9d'
:rtype: Deleted snapshot details
"""
result = []
try:
ec2Client = handle.client('ec2', region_name=region)
result = ec2Client.delete_snapshot(SnapshotId=snapshot_id)
except Exception as e:
raise e
return result
================================================
FILE: AWS/legos/aws_delete_ecs_cluster/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_delete_ecs_cluster/__init__.py
================================================
================================================
FILE: AWS/legos/aws_delete_ecs_cluster/aws_delete_ecs_cluster.json
================================================
{
"action_title": "AWS Delete ECS Cluster",
"action_description": "Delete AWS ECS Cluster",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_delete_ecs_cluster",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_delete_ecs_cluster/aws_delete_ecs_cluster.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Dict
import pprint
class InputSchema(BaseModel):
region: str = Field(..., description='AWS Region.', title='Region')
cluster_name: str = Field(
...,
description='ECS Cluster name that needs to be deleted',
title='ECS Cluster Name',
)
def aws_delete_ecs_cluster_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_delete_ecs_cluster(handle, region: str, cluster_name: str) -> Dict:
"""aws_delete_ecs_cluster dict of loadbalancers info.
:type region: string
:param region: AWS Region.
:type cluster_name: string
:param cluster_name: ECS Cluster name
:rtype: dict of load balancers info.
"""
try:
ec2Client = handle.client('ecs', region_name=region)
response = ec2Client.delete_cluster(cluster=cluster_name)
return response
except Exception as e:
raise Exception(e)
================================================
FILE: AWS/legos/aws_delete_load_balancer/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_delete_load_balancer/__init__.py
================================================
================================================
FILE: AWS/legos/aws_delete_load_balancer/aws_delete_load_balancer.json
================================================
{
"action_title": "AWS Delete Load Balancer",
"action_description": "AWS Delete Load Balancer",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_delete_load_balancer",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_delete_load_balancer/aws_delete_load_balancer.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
elb_arn: str = Field(
title='Load Balancer ARN (ALB/NLB type)',
description='Load Balancer ARN of the ALB/NLB type Load Balancer.'
)
region: str = Field(
title='Region',
description='AWS Region.'
)
def aws_delete_load_balancer_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_delete_load_balancer(handle, region: str, elb_arn: str) -> Dict:
"""aws_delete_load_balancer dict of loadbalancers info.
:type region: string
:param region: AWS Region.
:type elb_arn: string
:param elb_arn: load balancer ARNs.
:rtype: dict of load balancers info.
"""
try:
elbv2Client = handle.client('elbv2', region_name=region)
response = elbv2Client.delete_load_balancer(LoadBalancerArn=elb_arn)
return response
except Exception as e:
raise Exception(e)
================================================
FILE: AWS/legos/aws_delete_log_stream/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_delete_log_stream/__init__.py
================================================
================================================
FILE: AWS/legos/aws_delete_log_stream/aws_delete_log_stream.json
================================================
{
"action_title": "AWS Delete Log Stream",
"action_description": "AWS Delete Log Stream",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_delete_log_stream",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": ["CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_delete_log_stream/aws_delete_log_stream.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
log_group_name: str = Field(
title='Log Group Name',
description='Name of the log group.')
log_stream_name: str = Field(
title='Log Stream Name',
description='Name of the log stream.')
region: str = Field(
title='Region',
description='AWS Region')
def aws_delete_log_stream_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_delete_log_stream(handle, log_group_name: str, log_stream_name: str, region: str) -> Dict:
"""aws_delete_log_stream Deletes a log stream.
:type log_group_name: string
:param log_group_name: Name of the log group.
:type log_stream_name: string
:param log_stream_name: Name of the log stream.
:type region: string
:param region: AWS Region.
:rtype: Dict with the deleted log stream info.
"""
try:
log_Client = handle.client('logs', region_name=region)
response = log_Client.delete_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name)
return response
except Exception as e:
raise Exception(e)
================================================
FILE: AWS/legos/aws_delete_nat_gateway/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_delete_nat_gateway/__init__.py
================================================
================================================
FILE: AWS/legos/aws_delete_nat_gateway/aws_delete_nat_gateway.json
================================================
{
"action_title": "AWS Delete NAT Gateway",
"action_description": "AWS Delete NAT Gateway",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_delete_nat_gateway",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_delete_nat_gateway/aws_delete_nat_gateway.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
nat_gateway_id: str = Field(
title='NAT Gateway ID',
description='ID of the NAT Gateway.')
region: str = Field(
title='Region',
description='AWS Region.')
def aws_delete_nat_gateway_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_delete_nat_gateway(handle, nat_gateway_id: str, region: str) -> Dict:
"""aws_delete_nat_gateway Returns an dict of NAT gateways information.
:type region: string
:param region: AWS Region.
:type nat_gateway_id: string
:param nat_gateway_id: ID of the NAT Gateway.
:rtype: dict of NAT gateways information.
"""
try:
ec2Client = handle.client('ec2', region_name=region)
response = ec2Client.delete_nat_gateway(NatGatewayId=nat_gateway_id)
return response
except Exception as e:
raise Exception(e) from e
================================================
FILE: AWS/legos/aws_delete_rds_instance/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_delete_rds_instance/__init__.py
================================================
================================================
FILE: AWS/legos/aws_delete_rds_instance/aws_delete_rds_instance.json
================================================
{
"action_title": "AWS Delete RDS Instance",
"action_description": "Delete AWS RDS Instance",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_delete_rds_instance",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_RDS"]
}
================================================
FILE: AWS/legos/aws_delete_rds_instance/aws_delete_rds_instance.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
instance_id: str = Field(
...,
description=('The DB instance identifier for the DB instance to be deleted. '
'This parameter isn’t case-sensitive.'),
title='RDS DB Identifier',
)
region: str = Field(
..., description='AWS region of instance identifier', title='AWS Region'
)
def aws_delete_rds_instance_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_delete_rds_instance(handle, region: str, instance_id: str) -> Dict:
"""aws_delete_rds_instance dict of response.
:type region: string
:param region: AWS Region.
:type instance_id: string
:param instance_id: The DB instance identifier for the DB instance to be deleted.
This parameter isn’t case-sensitive.
:rtype: dict of response of deleting an RDS instance
"""
try:
ec2Client = handle.client('rds', region_name=region)
response = ec2Client.delete_db_instance(DBInstanceIdentifier=instance_id)
return response
except Exception as e:
raise Exception(e) from e
================================================
FILE: AWS/legos/aws_delete_redshift_cluster/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_delete_redshift_cluster/__init__.py
================================================
================================================
FILE: AWS/legos/aws_delete_redshift_cluster/aws_delete_redshift_cluster.json
================================================
{
"action_title": "AWS Delete Redshift Cluster",
"action_description": "Delete AWS Redshift Cluster",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_delete_redshift_cluster",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_REDSHIFT"]
}
================================================
FILE: AWS/legos/aws_delete_redshift_cluster/aws_delete_redshift_cluster.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Dict
import pprint
class InputSchema(BaseModel):
region: str = Field(..., description='AWS Region.', title='Region')
cluster_identifier: str = Field(
...,
description='The identifier of the cluster to be deleted.',
title='Cluster Identifier',
)
skip_final_cluster_snapshot: Optional[bool] = Field(
False,
description='Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true, a final cluster snapshot is not created. If false, a final cluster snapshot is created before the cluster is deleted.',
title='Skip Final Cluster Snapshot',
)
def aws_delete_redshift_cluster_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_delete_redshift_cluster(handle, region: str, cluster_identifier: str, skip_final_cluster_snapshot:bool=False) -> Dict:
"""aws_delete_redshift_cluster dict response.
:type region: string
:param region: AWS Region.
:type cluster_identifier: string
:param cluster_identifier: The identifier of the cluster to be deleted.
:type skip_final_cluster_snapshot: boolean
:param skip_final_cluster_snapshot: Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true, a final cluster snapshot is not created. If false, a final cluster snapshot is created before the cluster is deleted.
:rtype: dict of response
"""
try:
redshiftClient = handle.client('redshift', region_name=region)
response = redshiftClient.delete_cluster(
ClusterIdentifier=cluster_identifier,
SkipFinalClusterSnapshot=skip_final_cluster_snapshot
)
return response
except Exception as e:
raise Exception(e)
================================================
FILE: AWS/legos/aws_delete_route53_health_check/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_delete_route53_health_check/__init__.py
================================================
================================================
FILE: AWS/legos/aws_delete_route53_health_check/aws_delete_route53_health_check.json
================================================
{
"action_title": "AWS Delete Route 53 HealthCheck",
"action_description": "AWS Delete Route 53 HealthCheck",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_delete_route53_health_check",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": ["CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_delete_route53_health_check/aws_delete_route53_health_check.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
health_check_id: str = Field(
title='Health Check ID',
description='The ID of the Health Check to delete.')
def aws_delete_route53_health_check_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_delete_route53_health_check(handle, health_check_id: str) -> Dict:
"""aws_delete_route53_health_check Deletes a Route 53 Health Check.
:type health_check_id: string
:param health_check_id: The ID of the Health Check to delete.
:rtype: dict of health check information.
"""
try:
route_client = handle.client('route53')
response = route_client.delete_health_check(HealthCheckId=health_check_id)
return response
except Exception as e:
raise Exception(e) from e
================================================
FILE: AWS/legos/aws_delete_s3_bucket_encryption/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_delete_s3_bucket_encryption/__init__.py
================================================
================================================
FILE: AWS/legos/aws_delete_s3_bucket_encryption/aws_delete_s3_bucket_encryption.json
================================================
{
"action_title": "Delete AWS Default Encryption for S3 Bucket",
"action_description": "Delete AWS Default Encryption for S3 Bucket",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_delete_s3_bucket_encryption",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_S3" ]
}
================================================
FILE: AWS/legos/aws_delete_s3_bucket_encryption/aws_delete_s3_bucket_encryption.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region.')
bucket_name: str = Field(
title='Bucket Name',
description='AWS S3 Bucket Name.')
def aws_delete_s3_bucket_encryption_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_delete_s3_bucket_encryption(handle, bucket_name: str, region: str) -> Dict:
"""aws_delete_s3_bucket_encryption Puts default encryption configuration for bucket.
:type handle: object
:param handle: Object returned by the task.validate(...) method.
:type bucket_name: string
:param bucket_name: Name of the S3 bucket.
:type region: string
:param region: location of the bucket
:rtype: Dict with the response info.
"""
s3Client = handle.client('s3', region_name=region)
result = {}
# Setup default encryption configuration
try:
response = s3Client.delete_bucket_encryption(Bucket=bucket_name)
result['Response'] = response
except Exception as e:
result['Error'] = e
return result
================================================
FILE: AWS/legos/aws_delete_secret/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_delete_secret/__init__.py
================================================
================================================
FILE: AWS/legos/aws_delete_secret/aws_delete_secret.json
================================================
{
"action_title": "AWS Delete Secret",
"action_description": "AWS Delete Secret",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_delete_secret",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_delete_secret/aws_delete_secret.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
secret_name: str = Field(
title='Secret Name',
description='Name of the secret to be deleted.')
region: str = Field(
title='Region',
description='AWS Region.')
def aws_delete_secret_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_delete_secret(handle, region: str, secret_name: str) -> Dict:
"""aws_delete_secret Dict with secret details.
:type handle: object
:param handle: Object returned from Task Validate
:type secret_name: string
:param secret_name: Name of the secret to be deleted.
:type region: string
:param region: AWS Region.
:rtype: Dict with secret details.
"""
try:
secrets_client = handle.client('secretsmanager', region_name=region)
response = secrets_client.delete_secret(SecretId=secret_name)
return response
except Exception as e:
raise Exception(e) from e
================================================
FILE: AWS/legos/aws_delete_volume_by_id/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_delete_volume_by_id/__init__.py
================================================
##
## Copyright (c) 2022 unSkript, Inc
## All rights reserved.
##
================================================
FILE: AWS/legos/aws_delete_volume_by_id/aws_delete_volume_by_id.json
================================================
{
"action_title": "Delete AWS EBS Volume by Volume ID",
"action_description": "Delete AWS Volume by Volume ID",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_delete_volume_by_id",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_is_remediation": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_delete_volume_by_id/aws_delete_volume_by_id.py
================================================
##
## Copyright (c) 2022 unSkript, Inc
## All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
volume_id: str = Field(
title='Volume ID',
description='Volume ID.')
region: str = Field(
title='Region',
description='AWS Region.')
def aws_delete_volume_by_id_printer(output):
if output is None:
return
pprint.pprint({"Output": output})
def aws_delete_volume_by_id(handle, volume_id: str, region: str) -> str:
"""aws_filter_ebs_unattached_volumes Returns an array of ebs volumes.
:type handle: object
:param handle: Object returned by the task.validate(...) method.
:type region: string
:param region: Used to filter the volume for specific region.
:type volume_id: string
:param volume_id: Volume ID needed to delete particular volume.
:rtype: Result of the API in the List form.
"""
result = []
ec2Client = handle.client('ec2',region_name=region)
# Adding logic for deletion criteria
try:
response = ec2Client.delete_volume(VolumeId=volume_id,)
result.append(response)
except Exception as e:
result.append(e)
return result
================================================
FILE: AWS/legos/aws_deregister_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_deregister_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_deregister_instances/aws_deregister_instances.json
================================================
{
"action_title": " Deregisters AWS Instances from a Load Balancer",
"action_description": " Deregisters AWS Instances from a Load Balancer",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_deregister_instances",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ELB" ]
}
================================================
FILE: AWS/legos/aws_deregister_instances/aws_deregister_instances.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List, Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
elb_name: str = Field(
title='ELB Name',
description='Name of the Load Balancer.')
instance_ids: List[str] = Field(
title='Instance IDs',
description='List of instance IDs. For eg. ["i-foo", "i-bar"]')
region: str = Field(
title='Region',
description='AWS Region of the ELB.')
def aws_deregister_instances_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_deregister_instances(handle, elb_name: str, instance_ids: List, region: str) -> Dict:
"""aws_deregister_instances deregisters instances from a given Load Balancer.
:type handle: object
:param handle: Object returned by the task.validate(...) method.
:type elb_name: string
:param elb_name: Name of the Load Balancer.
:type instance_ids: list
:param instance_ids: List of instance IDs. For eg. ["i-foo", "i-bar"]
:type region: string
:param region: AWS Region of the ELB.
:rtype: dict with registered instance details.
"""
elbClient = handle.client('elb', region_name=region)
res = elbClient.deregister_instances_from_load_balancer(
LoadBalancerName=elb_name,
Instances=[{'InstanceId': instance_id} for instance_id in instance_ids]
)
return res
================================================
FILE: AWS/legos/aws_describe_cloudtrail/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_describe_cloudtrail/__init__.py
================================================
================================================
FILE: AWS/legos/aws_describe_cloudtrail/aws_describe_cloudtrail.json
================================================
{
"action_title": "AWS Describe Cloudtrails ",
"action_description": "Given an AWS Region, this Action returns a Dict with all of the Cloudtrail logs being recorded",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_describe_cloudtrail",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS" ]
}
================================================
FILE: AWS/legos/aws_describe_cloudtrail/aws_describe_cloudtrail.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from __future__ import annotations
import pprint
from typing import Dict
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def aws_describe_cloudtrail_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_describe_cloudtrail(handle, region:str) -> Dict:
# Create a client object for CloudTrail
cloudtrail_client = handle.client('cloudtrail', region_name=region)
# Use the describe_trails method to get information about the available trails
trails = cloudtrail_client.describe_trails()
return trails
================================================
FILE: AWS/legos/aws_detach_ebs_to_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_detach_ebs_to_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_detach_ebs_to_instances/aws_detach_ebs_to_instances.json
================================================
{
"action_title": " Detach as AWS Instance with a Elastic Block Store",
"action_description": " Detach as AWS Instance with a Elastic Block Store.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_detach_ebs_to_instances",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ,"CATEGORY_TYPE_AWS_EBS" ]
}
================================================
FILE: AWS/legos/aws_detach_ebs_to_instances/aws_detach_ebs_to_instances.py
================================================
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region of the ESB.')
volume_id: str = Field(
title='Volume id',
description='The ID of the volume.')
def aws_detach_ebs_to_instances_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_detach_ebs_to_instances(handle, region: str, volume_id: str) -> Dict:
"""aws_detach_ebs_to_instances Detach instances from a particular Elastic Block Store (EBS).
:type handle: object
:param handle:Object returned from task.validate(...).
:type volume_id: string
:param volume_id: The ID of the volume.
:type region: string
:param region: AWS Region of the ESB.
:rtype: dict with registered instance details.
"""
ec2Client = handle.client('ec2', region_name=region)
response = ec2Client.detach_volume(VolumeId=volume_id)
print(response)
return response
================================================
FILE: AWS/legos/aws_detach_instances_from_autoscaling_group/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_detach_instances_from_autoscaling_group/__init__.py
================================================
================================================
FILE: AWS/legos/aws_detach_instances_from_autoscaling_group/aws_detach_instances_from_autoscaling_group.json
================================================
{
"action_title": "AWS Detach Instances From AutoScaling Group",
"action_description": "Use This Action to AWS Detach Instances From AutoScaling Group",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_detach_instances_from_autoscaling_group",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_detach_instances_from_autoscaling_group/aws_detach_instances_from_autoscaling_group.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
instance_ids: str = Field(
title='Instance IDs',
description='List of instances.')
group_name: str = Field(
title='Group Name',
description='Name of AutoScaling Group.')
region: str = Field(
title='Region',
description='AWS Region of autoscaling group.')
def aws_detach_instances_from_autoscaling_group_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_detach_instances_from_autoscaling_group(
handle,
instance_ids: str,
group_name: str,
region: str
) -> Dict:
"""aws_detach_autoscaling_instances detach instances from autoscaling group.
:type handle: object
:param handle: Object returned from task.validate(...).
:type instance_ids: string
:param instance_ids: Name of instances.
:type group_name: string
:param group_name: Name of AutoScaling Group.
:type region: string
:param region: AWS Region of autoscaling group.
:rtype: Dict with the detach instance info.
"""
ec2Client = handle.client("autoscaling", region_name=region)
result = {}
try:
response = ec2Client.detach_instances(
InstanceIds=[instance_ids],
AutoScalingGroupName=group_name,
ShouldDecrementDesiredCapacity=True
)
result = response
except Exception as error:
result["error"] = error
return result
================================================
FILE: AWS/legos/aws_ebs_modify_volume/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_ebs_modify_volume/__init__.py
================================================
================================================
FILE: AWS/legos/aws_ebs_modify_volume/aws_ebs_modify_volume.json
================================================
{
"action_title": "EBS Modify Volume",
"action_description": "Modify/Resize volume for Elastic Block Storage (EBS).",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_ebs_modify_volume",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_ebs_modify_volume/aws_ebs_modify_volume.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
from unskript.enums.aws_k8s_enums import SizingOption
from polling2 import poll_decorator
class InputSchema(BaseModel):
volume_id: str = Field(
title="EBS Volume ID",
description="EBS Volume ID to resize."
)
resize_option: SizingOption = Field(
title="Resize option",
description='''
Option to resize the volume. 2 options supported:
1. Add - Use this option to resize by an amount.
2. Multiple - Use this option if you want to resize by a multiple of the current volume size.
'''
)
resize_value: int = Field(
title="Value",
description='''
Based on the resize option chosen, specify the value. For eg, if you chose Add option, this
value will be a value in Gb (like 100). If you chose Multiple option, this value will be a multiplying factor
to the current volume size. So, if you want to double, you specify 2 here.
'''
)
region: str = Field(
title="Region",
description="AWS Region of the volume."
)
def aws_ebs_modify_volume_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_ebs_modify_volume(
hdl: Session,
volume_id: str,
resize_option: SizingOption,
resize_value: int,
region: str,
) -> str:
"""aws_ebs_modify_volume modifies the size of the EBS Volume.
You can either increase it a provided value or by a provided multiple value.
:type volume_id: string
:param volume_id: ebs volume id.
:type resize_option: SizingOption
:param resize_option: option to resize the volume, by a fixed amount
or by a multiple of the existing size.
:type value: int
:param value: The value by which the volume should be modified,
depending upon the resize option.
:type region: string
:param region: AWS Region of the volume.
:rtype: New volume size.
"""
ec2Client = hdl.client("ec2", region_name=region)
ec2Resource = hdl.resource("ec2", region_name=region)
# Get the current volume size.
Volume = ec2Resource.Volume(volume_id)
currentSize = Volume.size
newSize = None
if resize_option == SizingOption.Add:
newSize = currentSize + resize_value
elif resize_option == SizingOption.Multiple:
newSize = currentSize * resize_value
else:
raise ValueError(f"Invalid resize option: {resize_option}")
print(f'CurrentSize {currentSize}, NewSize {newSize}')
resp=ec2Client.modify_volume(
VolumeId=volume_id,
Size=newSize)
pprint.pprint(resp.StatusMessage)
# Check the modification state
try:
check_modification_status(ec2Client, volume_id)
except Exception as e:
raise f'Modify volumeID {volume_id} failed: {str(e)}'
return f'Volume {volume_id} size modified successfully to {newSize}'
@poll_decorator(step=60, timeout=600, check_success=lambda x: x is True)
def check_modification_status(ec2Client, volumeID) -> bool:
resp = ec2Client.describe_volumes_modifications(VolumeIds=[volumeID])
state = resp['VolumesModifications'][0]['ModificationState']
progress = resp['VolumesModifications'][0]['Progress']
print(f'Volume modification state {state}, Progress {progress}')
if state in ('completed', None):
return True
if state == 'failed':
raise Exception("Get Status Failed")
return False
================================================
FILE: AWS/legos/aws_ecs_describe_task_definition/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_ecs_detect_failed_deployment/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: AWS/legos/aws_ecs_detect_failed_deployment/aws_ecs_detect_failed_deployment.json
================================================
{
"action_title": "ECS detect failed deployment ",
"action_description": "List of stopped tasks, associated with a deployment, along with their stopped reason",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_ecs_detect_failed_deployment",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ECS" ]
}
================================================
FILE: AWS/legos/aws_ecs_detect_failed_deployment/aws_ecs_detect_failed_deployment.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import List
import pprint
class InputSchema(BaseModel):
cluster_name: str = Field(
title="Cluster name",
description="ECS Cluster name"
)
service_name: str = Field(
title="Service name",
description="ECS Service name in the specified cluster."
)
region: str = Field(
title='Region',
description='AWS Region of the ECS service.')
def aws_ecs_detect_failed_deployment_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_ecs_detect_failed_deployment(handle, cluster_name: str, service_name: str, region: str) -> List:
"""aws_ecs_detect_failed_deployment returns the list .
:type handle: object
:param handle: Object returned from task.validate(...).
:type cluster_name: string
:param cluster_name: ECS Cluster name.
:type service_name: string
:param service_name: ECS Service name in the specified cluster.
:type region: string
:param region: AWS Region of the ECS service.
:rtype: List of stopped task while deployement along with reason.
"""
ecsClient = handle.client('ecs', region_name=region)
try:
serviceStatus = ecsClient.describe_services(cluster=cluster_name, services=[service_name])
except Exception as e:
print(f'Failed to get service status for {service_name}, cluster {cluster_name}, {e}')
return [f'Failed to get service status for {service_name}, cluster {cluster_name}, {e}']
# When the deployment is in progress, there will be 2 deployment entries, one PRIMARY and one ACTIVE. PRIMARY will eventually replace
# ACTIVE, if its successful.
deployments = serviceStatus.get('services')[0].get('deployments')
if deployments is None:
print("Empty deployment")
return ["Empty deployment"]
deploymentInProgress = False
primaryDeploymentID = ""
for deployment in deployments:
if deployment['status'] == "PRIMARY":
primaryDeploymentID = deployment['id']
else:
deploymentInProgress = True
if deploymentInProgress is False:
print("No deployment in progress")
return ["No deployment in progress"]
# Check if there are any stopped tasks because of this deployment
stoppedTasks = ecsClient.list_tasks(cluster=cluster_name, startedBy=primaryDeploymentID, desiredStatus="STOPPED").get('taskArns')
if len(stoppedTasks) == 0:
print(f'No stopped tasks associated with the deploymentID {primaryDeploymentID}, service {service_name}, cluster {cluster_name}')
return [f'No stopped tasks associated with the deploymentID {primaryDeploymentID}, service {service_name}, cluster {cluster_name}']
# Get the reason for the stopped tasks
taskDetails = ecsClient.describe_tasks(cluster=cluster_name, tasks=stoppedTasks)
output = []
for taskDetail in taskDetails.get('tasks'):
output.append({"TaskARN":taskDetail['taskArn'], "StoppedReason":taskDetail['stoppedReason']})
return output
================================================
FILE: AWS/legos/aws_ecs_service_restart/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_ecs_service_restart/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: AWS/legos/aws_ecs_service_restart/aws_ecs_service_restart.json
================================================
{
"action_title": "Restart AWS ECS Service",
"action_description": "Restart an AWS ECS Service",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_ecs_service_restart",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_BOOL",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ECS" ]
}
================================================
FILE: AWS/legos/aws_ecs_service_restart/aws_ecs_service_restart.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
import pprint
class InputSchema(BaseModel):
cluster_arn: str = Field(
title='Cluster ARN',
description='Full ARN of the cluster.'
)
service_name: str = Field(
title='Service Name',
description='Service name to restart.'
)
region: str = Field(
title='Region',
description='AWS Region of the cloudwatch.')
def aws_ecs_service_restart_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_ecs_service_restart(handle, cluster_arn: str, service_name: str, region: str) -> bool:
"""aws_ecs_service_restart returns boolean.
:type handle: object
:param handle: Object returned from task.validate(...).
:type cluster_arn: string
:param cluster_arn: Full ARN of the cluster.
:type service_name: string
:param service_name: ECS Service name in the specified cluster.
:type region: string
:param region: AWS Region of the ECS service.
:rtype: Returns True if the service was restarted successfully and an exception if not.
"""
# Input param validation.
ecsClient = handle.client('ecs', region_name=region)
ecsClient.update_service(
cluster=cluster_arn,
service=service_name,
forceNewDeployment=True
)
try:
waiter = ecsClient.get_waiter('services_stable')
waiter.wait(
cluster=cluster_arn,
services=[service_name]
)
except:
errString = f'"Failed restart service: {service_name} in cluster: {cluster_arn} after 40 checks."'
print(errString)
raise Exception(errString)
return True
================================================
FILE: AWS/legos/aws_ecs_update_service/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_eks_copy_pod_logs_to_bucket/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: AWS/legos/aws_eks_copy_pod_logs_to_bucket/aws_eks_copy_pod_logs_to_bucket.json
================================================
{
"action_title": " Copy EKS Pod logs to bucket.",
"action_description": " Copy given EKS pod logs to given S3 Bucket.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_eks_copy_pod_logs_to_bucket",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EKS" ]
}
================================================
FILE: AWS/legos/aws_eks_copy_pod_logs_to_bucket/aws_eks_copy_pod_logs_to_bucket.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
from typing import Dict
import pprint
class InputSchema(BaseModel):
clusterName: str = Field(
title='Cluster Name',
description='Name of cluster')
namespaceName: str = Field(
title='namespace Name',
description='Name of namespace')
podName: str = Field(
title='Pod Name',
description='Name of Pod')
bucketName: str = Field(
title='S3 Bucket Name',
description='Name of S3 Bucket')
region: str = Field(
title='Region',
description='AWS Region of the cluster')
def aws_eks_copy_pod_logs_to_bucket_printer(output):
if output is None:
return
print("\n")
pprint.pprint(output)
def aws_eks_copy_pod_logs_to_bucket(handle, clusterName: str, namespaceName: str, podName: str, bucketName: str,
region: str) -> Dict:
"""aws_eks_copy_pod_logs_to_bucket returns Dict.
:type handle: object
:param handle: Object returned from task.validate(...).
:type clusterName: string
:param clusterName: Cluster name.
:type podName: string
:param podName: Pod name.
:type bucketName: string
:param bucketName: Name of S3 Bucket.
:type namespaceName: string
:param namespaceName: Cluster Namespace.
:type region: string
:param region: AWS Region of the EKS cluster.
:rtype: Dict of name of pod and bucket with succcess message.
"""
k8shandle = handle.unskript_get_eks_handle(clusterName, region)
coreApiClient = client.CoreV1Api(api_client=k8shandle)
try:
api_response = coreApiClient.read_namespaced_pod_log(name=podName,
namespace=namespaceName)
except ApiException as e:
print(f"An Exception occured while reading pod log: {str(e)}")
raise e
s3Client = handle.client('s3', region_name=region)
try:
s3Client.put_object(Bucket=bucketName, Key=f"tests/{podName}_pod_logs",
Body=api_response)
except Exception as e:
print(f"Error: {str(e)}")
raise e
return {"success": f"Successfully copied {podName} pod logs to {bucketName} bucket."}
================================================
FILE: AWS/legos/aws_eks_delete_pod/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_eks_delete_pod/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: AWS/legos/aws_eks_delete_pod/aws_eks_delete_pod.json
================================================
{
"action_title": " Delete EKS POD in a given Namespace",
"action_description": " Delete a EKS POD in a given Namespace",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_eks_delete_pod",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EKS" ]
}
================================================
FILE: AWS/legos/aws_eks_delete_pod/aws_eks_delete_pod.py
================================================
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
# @author: Yugal Pachpande, @email: yugal.pachpande@unskript.com
##
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
import pprint
from typing import Dict
class InputSchema(BaseModel):
clusterName: str = Field(
title='Cluster Name',
description='Name of cluster')
namespace: str = Field(
title='Namespace',
description='Kubernetes namespace')
podname: str = Field(
title='Podname',
description='K8S Pod Name')
region: str = Field(
title='Region',
description='AWS Region of the cluster')
def aws_eks_delete_pod_printer(output):
if output is None:
return
print("\n")
pprint.pprint(output)
def aws_eks_delete_pod(handle, clusterName: str, namespace: str, podname: str, region: str) -> Dict:
"""aws_eks_delete_pod returns list.
:type handle: object
:param handle: Object returned from task.validate(...).
:type clusterName: string
:param clusterName: Cluster name.
:type namespace: string
:param namespace: Cluster Namespace.
:type podname: string
:param podname: Name of pod to be deleted.
:type region: string
:param region: AWS Region of the EKS cluster.
:rtype: Dict of details of deleted pod.
"""
k8shandle = handle.unskript_get_eks_handle(clusterName, region)
CoreV1Api = client.CoreV1Api(api_client=k8shandle)
try:
resp = CoreV1Api.delete_namespaced_pod(
name=podname, namespace=namespace, pretty=True)
except ApiException as e:
resp = 'An Exception occured while executing the command' + e.reason
return resp
================================================
FILE: AWS/legos/aws_eks_get_all_dead_pods/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_eks_get_all_dead_pods/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: AWS/legos/aws_eks_get_all_dead_pods/aws_eks_get_all_dead_pods.json
================================================
{
"action_title": "List of EKS dead pods",
"action_description": "Get list of all dead pods in a given EKS cluster",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_eks_get_all_dead_pods",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EKS" ]
}
================================================
FILE: AWS/legos/aws_eks_get_all_dead_pods/aws_eks_get_all_dead_pods.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from typing import Optional, List
from kubernetes import client
from kubernetes.client.rest import ApiException
import pandas as pd
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
class InputSchema(BaseModel):
clusterName: str = Field(
title='Cluster Name',
description='Name of EKS cluster')
namespace: Optional[str] = Field(
'all',
title='Cluster Namespace',
description='Cluster Namespace')
region: str = Field(
title='Region',
description='AWS Region of the EKS cluster')
def aws_eks_get_all_dead_pods_printer(output):
if output is None:
return
print("\n")
if not output:
print ("There are no dead pods in this namespace")
return
pprint.pprint(pd.DataFrame(output))
def aws_eks_get_all_dead_pods(handle: Session,clusterName: str,region: str,namespace: str = 'all',) -> List:
"""aws_eks_get_all_dead_podsr eturns list.
:type handle: object
:param handle: Object returned from task.validate(...).
:type clusterName: string
:param clusterName: Cluster name.
:type namespace: string
:param namespace: Cluster Namespace.
:type region: string
:param region: AWS Region of the EKS cluster.
:rtype: List of all dead pods in a namespace.
"""
k8shandle = handle.unskript_get_eks_handle(clusterName, region)
coreApiClient = client.CoreV1Api(api_client=k8shandle)
try:
res = coreApiClient.list_namespaced_pod(
namespace=namespace, pretty=True)
except ApiException as e:
pprint.pprint(str(e))
res = 'An Exception occured while executing the command' + e.reason
data = []
for i in res.items:
for container_status in i.status.container_statuses:
if container_status.started is False or container_status.ready is False:
waiting_state = container_status.state.waiting
status = waiting_state.reason
if status.lower() in ["evicted"]:
data.append({"Pod Ip": i.status.pod_ip,
"Namespace": i.metadata.namespace,
"Pod Name": i.metadata.name,
"Container Name": container_status.name,
"Status": status,
"Start Time": i.status.start_time,
})
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
return data
================================================
FILE: AWS/legos/aws_eks_get_all_namespaces/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_eks_get_all_namespaces/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: AWS/legos/aws_eks_get_all_namespaces/aws_eks_get_all_namespaces.json
================================================
{
"action_title": "List of EKS Namespaces",
"action_description": "Get list of all Namespaces in a given EKS cluster",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_eks_get_all_namespaces",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EKS"]
}
================================================
FILE: AWS/legos/aws_eks_get_all_namespaces/aws_eks_get_all_namespaces.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
import pandas as pd
from typing import List
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
class InputSchema(BaseModel):
clusterName: str = Field(
title='Cluster Name',
description='Name of EKS cluster')
region: str = Field(
title='Region',
description='AWS Region of the EKS cluster')
def aws_eks_get_all_namespaces_printer(output):
if output is None:
return
print("\n")
pprint.pprint(pd.DataFrame(output))
def aws_eks_get_all_namespaces(handle: Session, clusterName: str, region: str) -> List:
"""aws_eks_get_all_namespaces returns list.
:type handle: object
:param handle: Object returned from task.validate(...).
:type clusterName: string
:param clusterName: Cluster name.
:type region: string
:param region: AWS Region of the EKS cluster.
:rtype: List of all namespaces in cluster.
"""
k8shandle = handle.unskript_get_eks_handle(clusterName, region)
coreApiClient = client.CoreV1Api(api_client=k8shandle)
try:
res = coreApiClient.list_namespace(pretty=True)
except ApiException as e:
pprint.pprint(str(e))
res = 'An Exception occured while executing the command' + e.reason
data = []
for i in res.items:
data.append({"Namespace": i.metadata.name,
"Status": i.status.phase,
"Start Time": str(i.metadata.creation_timestamp),
})
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
return data
================================================
FILE: AWS/legos/aws_eks_get_all_pods/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_eks_get_all_pods/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: AWS/legos/aws_eks_get_all_pods/aws_eks_get_all_pods.json
================================================
{
"action_title": "List of EKS pods",
"action_description": "Get list of all pods in a given EKS cluster",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_eks_get_all_pods",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EKS"]
}
================================================
FILE: AWS/legos/aws_eks_get_all_pods/aws_eks_get_all_pods.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from typing import Optional, List
from kubernetes import client
from kubernetes.client.rest import ApiException
import pandas as pd
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
class InputSchema(BaseModel):
clusterName: str = Field(
title='Cluster Name',
description='Name of EKS cluster')
namespace: Optional[str] = Field(
'all',
title='Cluster Namespace',
description='Cluster Namespace')
region: str = Field(
title='Region',
description='AWS Region of the EKS cluster')
def aws_eks_get_all_pods_printer(output):
if output is None:
return
print("\n")
pprint.pprint(pd.DataFrame(output))
def aws_eks_get_all_pods(handle: Session, clusterName: str, region: str, namespace: str = 'all', ) -> List:
"""aws_eks_get_all_pods returns list.
:type handle: object
:param handle: Object returned from task.validate(...).
:type clusterName: string
:param clusterName: Cluster name.
:type namespace: string
:param namespace: Cluster Namespace.
:type region: string
:param region: AWS Region of the EKS cluster.
:rtype: List of pods with status ip and start time.
"""
k8shandle = handle.unskript_get_eks_handle(clusterName, region)
coreApiClient = client.CoreV1Api(api_client=k8shandle)
try:
res = coreApiClient.list_namespaced_pod(
namespace=namespace, pretty=True)
except ApiException as e:
pprint.pprint(str(e))
res = 'An Exception occured while executing the command' + e.reason
data = []
for i in res.items:
data.append({"Pod Ip": i.status.pod_ip,
"Namespace": i.metadata.namespace,
"Name": i.metadata.name,
"Status": i.status.phase,
"Start Time": i.status.start_time,
})
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
return data
================================================
FILE: AWS/legos/aws_eks_get_deployments_name/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_eks_get_deployments_name/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: AWS/legos/aws_eks_get_deployments_name/aws_eks_get_deployments_name.json
================================================
{
"action_title": " List of EKS deployment for given Namespace",
"action_description": " Get list of EKS deployment names for given Namespace",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_eks_get_deployments_name",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EKS" ]
}
================================================
FILE: AWS/legos/aws_eks_get_deployments_name/aws_eks_get_deployments_name.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
## @author: Yugal Pachpande, @email: yugal.pachpande@unskript.com
##
import pprint
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
from typing import List
import pandas as pd
class InputSchema(BaseModel):
clusterName: str = Field(
title='Cluster Name',
description='Name of cluster')
namespace: str = Field(
title='Cluster Namespace',
description='Cluster Namespace')
region: str = Field(
title='Region',
description='AWS Region of the cluster')
def aws_eks_get_deployments_name_printer(output):
if output is None:
return
print("\n")
pprint.pprint(pd.DataFrame(output))
def aws_eks_get_deployments_name(handle, clusterName: str, namespace: str, region: str) -> List:
"""aws_eks_get_deployments_name returns list.
:type handle: object
:param handle: Object returned from task.validate(...).
:type clusterName: string
:param clusterName: Cluster name.
:type namespace: string
:param namespace: Cluster Namespace.
:type region: string
:param region: AWS Region of the EKS cluster.
:rtype: List of deployments.
"""
k8shandle = handle.unskript_get_eks_handle(clusterName, region)
coreApiClient = client.AppsV1Api(api_client=k8shandle)
deployments_list = []
try:
resp = coreApiClient.list_namespaced_deployment(namespace, pretty=True)
for deployment in resp.items:
res = {}
res["NAME"] = deployment.metadata.name
res['READY'] = f"Ready {deployment.status.ready_replicas}/{deployment.status.available_replicas}"
res['UP-TO-DATE'] = deployment.status.updated_replicas
res['AVAILABLE'] = deployment.status.available_replicas
res['START_TIME'] = deployment.metadata.creation_timestamp.strftime("%m/%d/%Y, %H:%M:%S")
deployments_list.append(res)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
except ApiException as e:
return ['An Exception occured while executing the command' + e.reason]
return deployments_list
================================================
FILE: AWS/legos/aws_eks_get_node_cpu_memory/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_eks_get_node_cpu_memory/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: AWS/legos/aws_eks_get_node_cpu_memory/aws_eks_get_node_cpu_memory.json
================================================
{
"action_title": "Get CPU and memory utilization of node.",
"action_description": " Get CPU and memory utilization of given node.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_eks_get_node_cpu_memory",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EKS"]
}
================================================
FILE: AWS/legos/aws_eks_get_node_cpu_memory/aws_eks_get_node_cpu_memory.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
import pandas as pd
from typing import Optional, List
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
class InputSchema(BaseModel):
clusterName: str = Field(
title='Cluster Name',
description='Name of cluster.')
nodeName: Optional[str] = Field(
title='Node Name',
description='Name of node.')
region: str = Field(
title='Region',
description='AWS Region of the cluster.')
def aws_eks_get_node_cpu_memory_printer(output):
if output is None:
return
print("\n")
pprint.pprint(pd.DataFrame(output))
def aws_eks_get_node_cpu_memory(handle: Session, clusterName: str, region: str, nodeName: str = None) -> List:
"""aws_eks_get_node_cpu_memory returns list.
:type handle: object
:param handle: Object returned from task.validate(...).
:type clusterName: string
:param clusterName: ECS Cluster name.
:type region: string
:param region: AWS Region of the EKS cluster.
:type nodeName: string
:param nodeName: Name of Node.
:rtype: List of nodes with cpu and memory details.
"""
k8shandle = handle.unskript_get_eks_handle(clusterName, region)
coreApiClient = client.CoreV1Api(api_client=k8shandle)
try:
if nodeName:
resp = coreApiClient.read_node(nodeName)
data = [{"node_name": resp.metadata.name, "cpu": int(resp.status.capacity.get("cpu").split("Ki")[0]),
"memory": f"{round(int(resp.status.capacity.get('memory').split('Ki')[0]) / 1024, 2)} Mi"}]
else:
data = []
resp = coreApiClient.list_node(pretty=True)
for node in resp.items:
data.append({"node_name": node.metadata.name,
"cpu": node.status.capacity.get("cpu"),
"memory": f"{round(int(node.status.capacity.get('memory').split('Ki')[0]) / 1024, 2)} Mi"})
except ApiException as e:
pprint.pprint(str(e))
data = [
{'error': 'An Exception occured while executing the command' + e.reason}]
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
return data
================================================
FILE: AWS/legos/aws_eks_get_nodes/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_eks_get_nodes/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: AWS/legos/aws_eks_get_nodes/aws_eks_get_nodes.json
================================================
{
"action_title": " Get EKS Nodes",
"action_description": " Get EKS Nodes",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_eks_get_nodes",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EKS"]
}
================================================
FILE: AWS/legos/aws_eks_get_nodes/aws_eks_get_nodes.py
================================================
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
# @author: Yugal Pachpande, @email: yugal.pachpande@unskript.com
##
from pydantic import BaseModel, Field
from kubernetes import client
import datetime
from kubernetes.client.rest import ApiException
from typing import List
from unskript.legos.utils import print_output_in_tabular_format
class InputSchema(BaseModel):
clusterName: str = Field(
title='Cluster Name',
description='Name of cluster')
region: str = Field(
title='Region',
description='AWS Region of the cluster')
def aws_eks_get_nodes_printer(output):
if output is None:
return
print("\n")
print(print_output_in_tabular_format(output))
def aws_eks_get_nodes(handle, clusterName: str, region: str) -> List:
"""aws_eks_get_nodes returns the list of all eks nodes.
:type handle: object
:param handle: Object returned from task.validate(...).
:type clusterName: string
:param clusterName: Name of the cluster.
:type region: string
:param region: AWS Region of the cluster.
:rtype: List with details of nodes.
"""
k8shandle = handle.unskript_get_eks_handle(clusterName, region)
coreApiClient = client.CoreV1Api(api_client=k8shandle)
try:
resp = coreApiClient.list_node(pretty=True)
except ApiException as e:
resp = 'An Exception occured while executing the command' + e.reason
raise e
output = []
for node in resp.items:
labels = [f"{label}={value}"
for label, value in node.metadata.labels.items()]
nodeStatus = node.status.conditions
type = ""
for i in nodeStatus:
type = i.type
output.append(
{"name": node.metadata.name, "status": type,
"age": f"{(datetime.datetime.now() - node.metadata.creation_timestamp.replace(tzinfo=None)).days}d",
"version": node.status.node_info.kubelet_version, "labels": ",".join(labels)})
return output
================================================
FILE: AWS/legos/aws_eks_get_not_running_pods/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_eks_get_not_running_pods/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: AWS/legos/aws_eks_get_not_running_pods/aws_eks_get_not_running_pods.json
================================================
{
"action_title": " List of EKS pods not in RUNNING State",
"action_description": " Get list of all pods in a given EKS cluster that are not running.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_eks_get_not_running_pods",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EKS" ]
}
================================================
FILE: AWS/legos/aws_eks_get_not_running_pods/aws_eks_get_not_running_pods.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
import pandas as pd
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
from typing import List
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
class InputSchema(BaseModel):
clusterName: str = Field(
title='Cluster Name.',
description='Name of EKS cluster.')
region: str = Field(
title='Region',
description='AWS Region of the EKS cluster.')
def aws_eks_get_not_running_pods_printer(output):
if output is None:
return
print("\n")
pprint.pprint(pd.DataFrame(output))
def aws_eks_get_not_running_pods(handle: Session, clusterName: str, region: str) -> List:
"""aws_eks_get_not_running_pods returns list.
:type handle: object
:param handle: Object returned from task.validate(...).
:type clusterName: string
:param clusterName: Cluster name.
:type region: string
:param region: AWS Region of the EKS cluster.
:rtype: List of pods not in running state .
"""
k8shandle = handle.unskript_get_eks_handle(clusterName, region)
coreApiClient = client.CoreV1Api(api_client=k8shandle)
try:
resp = coreApiClient.list_pod_for_all_namespaces(pretty=True)
except ApiException as e:
pprint.pprint(str(e))
resp = 'An Exception occured while executing the command' + e.reason
res = []
for container in resp.items:
if container.status.phase not in ["Running"]:
res.append({"pod_name": container.metadata.name, "status": container.status.phase,
"namespace": container.metadata.namespace,
"node_name": container.spec.node_name})
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', -1)
return res
================================================
FILE: AWS/legos/aws_eks_get_pod_cpu_memory/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_eks_get_pod_cpu_memory/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: AWS/legos/aws_eks_get_pod_cpu_memory/aws_eks_get_pod_cpu_memory.json
================================================
{
"action_title": "Get pod CPU and Memory usage from given namespace",
"action_description": "Get all pod CPU and Memory usage from given namespace",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_eks_get_pod_cpu_memory",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EKS"]
}
================================================
FILE: AWS/legos/aws_eks_get_pod_cpu_memory/aws_eks_get_pod_cpu_memory.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
import pandas as pd
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
from typing import List
class InputSchema(BaseModel):
clusterName: str = Field(
title='Cluster Name',
description='Name of cluster')
namespace: str = Field(
title='Cluster namespace',
description='Cluster Namespace')
region: str = Field(
title='Region',
description='AWS Region of the cluster')
def aws_eks_get_pod_cpu_memory_printer(output):
if output is None:
return
print("\n")
pprint.pprint(pd.DataFrame(output))
def aws_eks_get_pod_cpu_memory(handle, clusterName: str, namespace: str, region: str) -> List:
"""aws_eks_get_pod_cpu_memory returns list.
:type handle: object
:param handle: Object returned from task.validate(...).
:type clusterName: string
:param clusterName: Cluster name.
:type namespace: string
:param namespace: Cluster Namespace.
:type region: string
:param region: AWS Region of the EKS cluster.
:rtype: List of pods with cpu and memory usage details.
"""
k8shandle = handle.unskript_get_eks_handle(clusterName, region)
CustomObjectsClient = client.CustomObjectsApi(api_client=k8shandle)
try:
data = []
resp = CustomObjectsClient.list_namespaced_custom_object(group="metrics.k8s.io",
version="v1beta1",
namespace=namespace,
plural="pods")
for pod in resp.get('items', []):
for container in pod.get('containers', []):
data.append({
"pod_name": pod['metadata']['name'], "container_name": container.get('name'),
"cpu": container['usage']["cpu"],
"memory": f"{round(int(container['usage']['memory'].split('Ki')[0]) / 1024, 2)} Mi"})
except ApiException as e:
pprint.pprint(str(e))
data = [
{'error': 'An Exception occured while executing the command' + e.reason}]
raise e
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
return data
================================================
FILE: AWS/legos/aws_eks_get_pod_status/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_eks_get_pod_status/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: AWS/legos/aws_eks_get_pod_status/aws_eks_get_pod_status.json
================================================
{
"action_title": " EKS Get pod status",
"action_description": " Get a Status of given POD in a given Namespace and EKS cluster name",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_eks_get_pod_status",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EKS" ]
}
================================================
FILE: AWS/legos/aws_eks_get_pod_status/aws_eks_get_pod_status.py
================================================
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
# @author: Yugal Pachpande, @email: yugal.pachpande@unskript.com
##
from typing import Optional, Dict
from pydantic import BaseModel, Field
from kubernetes import client
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
import pprint
class InputSchema(BaseModel):
clusterName: str = Field(
title='Cluster Name',
description='Name of cluster')
namespace: Optional[str] = Field(
title='Cluster Namespace',
description='Cluster Namespace')
pod_name: str = Field(
title='Pod Name',
description='Name of the pod.')
region: str = Field(
title='Region',
description='AWS Region of the cluster')
def aws_eks_get_pod_status_printer(output):
if output is None:
return
print("\n")
pprint.pprint(output)
def aws_eks_get_pod_status(handle: Session, clusterName: str, pod_name: str, region: str, namespace: str = None) -> Dict:
"""aws_eks_get_pod_status returns Dict.
:type handle: object
:param handle: Object returned from task.validate(...).
:type clusterName: string
:param clusterName: Cluster name.
:type pod_name: string
:param pod_name: Name of the pod.
:type namespace: string
:param namespace: Cluster Namespace.
:type region: string
:param region: AWS Region of the EKS cluster.
:rtype: Dict of pods details with status.
"""
k8shandle = handle.unskript_get_eks_handle(clusterName, region)
coreApiClient = client.CoreV1Api(api_client=k8shandle)
status = coreApiClient.read_namespaced_pod_status(
namespace=namespace, name=pod_name)
res = {}
ready_containers_number = 0
containers_number = 0
restarts_number = 0
for container in status.status.container_statuses:
if container.ready:
ready_containers_number += 1
if container.restart_count:
restarts_number = restarts_number + container.restart_count
containers_number += 1
res["NAME"] = pod_name
res['READY'] = f"Ready {ready_containers_number}/{containers_number}"
res['STATUS'] = status.status.phase
res['RESTARTS'] = restarts_number
res['START_TIME'] = status.status.start_time.strftime("%m/%d/%Y, %H:%M:%S")
return res
================================================
FILE: AWS/legos/aws_eks_get_running_pods/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_eks_get_running_pods/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: AWS/legos/aws_eks_get_running_pods/aws_eks_get_running_pods.json
================================================
{
"action_title": " EKS Get Running Pods",
"action_description": " Get a list of running pods from given namespace and EKS cluster name",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_eks_get_running_pods",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EKS"]
}
================================================
FILE: AWS/legos/aws_eks_get_running_pods/aws_eks_get_running_pods.py
================================================
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
# @author: Yugal Pachpande, @email: yugal.pachpande@unskript.com
##
import pprint
from pydantic import BaseModel, Field
from kubernetes import client
from typing import List
class InputSchema(BaseModel):
clusterName: str = Field(
title='Cluster Name',
description='Name of cluster')
namespace: str = Field(
title='Cluster Namespace',
description='Cluster Namespace')
region: str = Field(
title='Region',
description='AWS Region of the cluster')
def aws_eks_get_running_pods_printer(output):
if output is None:
return
print("\n")
pprint.pprint(output)
def aws_eks_get_running_pods(handle, clusterName: str, namespace: str, region: str) -> List:
"""aws_eks_get_running_pods returns list.
:type handle: object
:param handle: Object returned from task.validate(...).
:type clusterName: string
:param clusterName: Cluster name.
:type namespace: string
:param namespace: Cluster Namespace.
:type region: string
:param region: AWS Region of the EKS cluster.
:rtype: List of pods with status ip and start time.
"""
k8shandle = handle.unskript_get_eks_handle(clusterName, region)
coreApiClient = client.CoreV1Api(api_client=k8shandle)
ret = coreApiClient.list_namespaced_pod(namespace=namespace)
all_healthy_pods = []
for i in ret.items:
phase = i.status.phase
if phase in ("Running", "Succeeded"):
all_healthy_pods.append(i.metadata.name)
return all_healthy_pods
================================================
FILE: AWS/legos/aws_eks_run_kubectl_cmd/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_eks_run_kubectl_cmd/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: AWS/legos/aws_eks_run_kubectl_cmd/aws_eks_run_kubectl_cmd.json
================================================
{
"action_title": " Run Kubectl commands on EKS Cluster",
"action_description": "This action runs a kubectl command on an AWS EKS Cluster",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_eks_run_kubectl_cmd",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EKS" ]
}
================================================
FILE: AWS/legos/aws_eks_run_kubectl_cmd/aws_eks_run_kubectl_cmd.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
import pprint
class InputSchema(BaseModel):
clusterName: str = Field(
title='EKS Cluster Name',
description='Name EKS Cluster')
command: str = Field(
title='Kubectl Command',
description='kubectl commands For Eg. kubectl get pods --all-namespaces')
region: str = Field(
title='Region',
description='AWS Region of the cluster')
def aws_eks_run_kubectl_cmd_printer(output):
if output is None:
return
print("\n")
pprint.pprint(output)
def aws_eks_run_kubectl_cmd(handle, clusterName: str, command: str, region: str) -> str:
"""aws_eks_run_kubectl_cmd returns string.
:type handle: object
:param handle: Object returned from task.validate(...).
:type clusterName: string
:param clusterName: Cluster name.
:type command: string
:param command: Kubectl command to run on EKS Cluster .
:type region: string
:param region: AWS Region of the EKS cluster.
:rtype: string of output of command result.
"""
result = handle.unskript_get_eks_handle(clusterName, region).run_native_cmd(command)
if result.stderr:
return "The kubectl command didn't work!"
return result.stdout
================================================
FILE: AWS/legos/aws_emr_get_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_emr_get_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_emr_get_instances/aws_emr_get_instances.json
================================================
{
"action_title": "Get AWS EMR Instances",
"action_description": "Get a list of EC2 Instances for an EMR cluster. Filtered by node type (MASTER|CORE|TASK)",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_emr_get_instances",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EMR" ]
}
================================================
FILE: AWS/legos/aws_emr_get_instances/aws_emr_get_instances.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
from typing import List
import pprint
class InputSchema(BaseModel):
cluster_id: str = Field(
title='Cluster Id',
description='Cluster ID for the EMR cluster. Eg j-abcd')
instance_group_type: str = Field(
title='Instance Group Type',
description='Group type to filter on. Possible values are MASTER|CORE|TASK'
)
region: str = Field(
title='Region',
description='AWS Region of the cluster')
def aws_emr_get_instances_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_emr_get_instances(
handle,
cluster_id: str,
instance_group_type: str,
region: str) -> List:
"""aws_get_unhealthy_instances returns array of emr instances
:type handle: object
:param handle: Object returned from task.validate(...).
:type cluster_id: string
:param cluster_id: Cluster ID for the EMR cluster.
:type instance_group_type: string
:param instance_group_type: Group type to filter on.
:type region: string
:param region: AWS Region of the cluster
:rtype: Returns array of emr instances
"""
client = handle.client('emr', region_name=region)
response = client.list_instances(
ClusterId=cluster_id,
InstanceGroupTypes=[instance_group_type],
)
if response.get('Instances') is None:
return []
return([x.get('Ec2InstanceId') for x in response.get('Instances')])
================================================
FILE: AWS/legos/aws_execute_cli_command/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_execute_cli_command/__init__.py
================================================
================================================
FILE: AWS/legos/aws_execute_cli_command/aws_execute_cli_command.json
================================================
{
"action_title": "Run Command via AWS CLI",
"action_description": "Execute command using AWS CLI",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_execute_cli_command",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_CLI" ]
}
================================================
FILE: AWS/legos/aws_execute_cli_command/aws_execute_cli_command.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
from pydantic import BaseModel, Field
import pprint
class InputSchema(BaseModel):
aws_command: str = Field(
title='AWS Command',
description='AWS Command '
'eg "aws ec2 describe-instances"'
)
def aws_execute_cli_command_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_execute_cli_command(handle, aws_command: str) -> str:
result = handle.aws_cli_command(aws_command)
if result is None or result.returncode != 0:
print(
f"Error while executing command ({aws_command}): {result}")
return str()
return result.stdout
================================================
FILE: AWS/legos/aws_execute_command_ssm/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_execute_command_ssm/__init__.py
================================================
================================================
FILE: AWS/legos/aws_execute_command_ssm/aws_execute_command_ssm.json
================================================
{
"action_title": " Run Command via SSM",
"action_description": " Execute command on EC2 instance(s) using SSM",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_execute_command_ssm",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_SSM" ]
}
================================================
FILE: AWS/legos/aws_execute_command_ssm/aws_execute_command_ssm.py
================================================
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
from typing import List, Dict
import pprint
import time
class InputSchema(BaseModel):
instance_ids: List[str] = Field(
title='Instance IDs',
description='List of instance IDs. For eg. ["i-foo", "i-bar"]')
document_name: str = Field(
'AWS-RunPowerShellScript',
title='Document Name',
description='Name of the SSM document to run.')
parameters: List[str] = Field(
title='SSM Document Name',
description='List of commands to execute on instance. For eg. ["ifconfig", "pwd"]')
region: str = Field(
title='Region',
description='AWS Region of the AWS Instance.')
def aws_execute_command_ssm_printer(output):
if output is None:
return
print("\n")
pprint.pprint(output)
def aws_execute_command_ssm(handle, instance_ids: list, parameters: list, region: str,
document_name: str = "AWS-RunPowerShellScript") -> Dict:
"""aws_execute_command_via_ssm EC2 Run Command via SSH.
:type handle: object
:param handle: Object returned from task.validate(...).
:type instance_ids: list
:param instance_ids: List of instance IDs. For eg. ["i-foo", "i-bar"]
:type parameters: list
:param parameters: List of commands to execute on instance. For eg. ["ifconfig", "pwd"]
:type document_name: string
:param document_name: Document Name.
:type region: string
:param region: AWS Region of the AWS Instance.
:rtype: Dict of command output.
"""
ssm_client = handle.client('ssm', region_name=region)
response = ssm_client.send_command(
InstanceIds=instance_ids,
DocumentName=document_name,
Parameters={
'commands': parameters
})
command_id = response['Command']['CommandId']
output = {}
time.sleep(2)
for instance_id in instance_ids:
res = ssm_client.get_command_invocation(
CommandId=command_id,
InstanceId=instance_id,
)
output[instance_id] = res
return output
================================================
FILE: AWS/legos/aws_filter_all_manual_database_snapshots/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_filter_all_manual_database_snapshots/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_all_manual_database_snapshots/aws_filter_all_manual_database_snapshots.json
================================================
{
"action_title": "AWS Filter All Manual Database Snapshots",
"action_description": "Use This Action to AWS Filter All Manual Database Snapshots",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_all_manual_database_snapshots",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_is_check":false,
"action_verbs": ["filter"],
"action_nouns": ["aws","database","snapshots","manual"],
"action_categories":[],
"action_next_hop":[],
"action_next_hop_parameter_mapping":{},
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2","CATEGORY_TYPE_DB" ]
}
================================================
FILE: AWS/legos/aws_filter_all_manual_database_snapshots/aws_filter_all_manual_database_snapshots.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from typing import List, Dict
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
import pprint
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region of database.')
def aws_filter_all_manual_database_snapshots_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_filter_all_manual_database_snapshots(handle, region: str) -> List:
"""aws_get_manual_snapshots List all the manual database snapshots.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: Region for database.
:rtype: List of manual database snapshots.
"""
ec2Client = handle.client('rds', region_name=region)
result = []
try:
response = aws_get_paginator(ec2Client, "describe_db_snapshots","DBSnapshots",
SnapshotType='manual')
for snapshot in response:
result.append(snapshot['DBSnapshotIdentifier'])
except Exception as error:
pass
return result
================================================
FILE: AWS/legos/aws_filter_ebs_unattached_volumes/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_filter_ebs_unattached_volumes/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_ebs_unattached_volumes/aws_filter_ebs_unattached_volumes.json
================================================
{
"action_title": "Filter AWS Unattached EBS Volume",
"action_description": "Filter AWS Unattached EBS Volume",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_ebs_unattached_volumes",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check":true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_next_hop": ["da23633be34037f023e1c1f56220ec75eb2729d7d8eb2bca9badec15ed0fd2ca"],
"action_next_hop_parameter_mapping": {"da23633be34037f023e1c1f56220ec75eb2729d7d8eb2bca9badec15ed0fd2ca": {"name": "Delete Unattached AWS EBS Volumes", "region":".[0].region","ebs_volume":"map(.volume_id)"}},
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2","CATEGORY_TYPE_AWS_EBC" ]
}
================================================
FILE: AWS/legos/aws_filter_ebs_unattached_volumes/aws_filter_ebs_unattached_volumes.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Tuple
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
import pprint
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='AWS Region.')
def aws_filter_ebs_unattached_volumes_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_filter_ebs_unattached_volumes(handle, region: str = "") -> Tuple:
"""aws_filter_ebs_unattached_volumes Returns an array of ebs volumes.
:type region: string
:param region: Used to filter the volume for specific region.
:rtype: Tuple with status result and list of EBS Unattached Volume.
"""
result=[]
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
# Filtering the volume by region
ec2Client = handle.resource('ec2', region_name=reg)
volumes = ec2Client.volumes.all()
# collecting the volumes which has zero attachments
for volume in volumes:
volume_dict = {}
if len(volume.attachments) == 0:
volume_dict["region"] = reg
volume_dict["volume_id"] = volume.id
result.append(volume_dict)
except Exception as e:
pass
if len(result) != 0:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_filter_ebs_volumes_with_low_iops/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_filter_ebs_volumes_with_low_iops/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_ebs_volumes_with_low_iops/aws_filter_ebs_volumes_with_low_iops.json
================================================
{
"action_title": "Filter AWS EBS Volume with Low IOPS",
"action_description": "IOPS (Input/Output Operations Per Second) is a metric used to measure the amount of input/output operations that an EBS volume can perform per second.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_ebs_volumes_with_low_iops",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check":true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS"],
"action_next_hop": [],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: AWS/legos/aws_filter_ebs_volumes_with_low_iops/aws_filter_ebs_volumes_with_low_iops.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Tuple
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
import pprint
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='AWS Region.')
iops_threshold: Optional[int] = Field(
default=100,
title="IOPS's Threshold",
description="IOPS's Threshold is a metric used to measure the amount of input/output operations that an EBS volume can perform per second.")
def aws_filter_ebs_volumes_with_low_iops_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_filter_ebs_volumes_with_low_iops(handle, region: str = "", iops_threshold: int = 100) -> Tuple:
"""aws_filter_ebs_unattached_volumes Returns an array of ebs volumes.
:type region: string
:param region: Used to filter the volume for specific region.
:type iops_threshold: int
:param iops_threshold: IOPS's Threshold is a metric used to measure the amount of input/output operations that an EBS volume can perform per second.
:rtype: Tuple with status result and list of low IOPS EBS Volumes.
"""
result=[]
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
# Filtering the volume by region
ec2Client = handle.resource('ec2', region_name=reg)
volumes = ec2Client.volumes.all()
# collecting the volumes which has low IOPS's
for volume in volumes:
volume_dict = {}
if volume.iops < iops_threshold:
volume_dict["region"] = reg
volume_dict["volume_id"] = volume.id
volume_dict["volume_iops"] = volume.iops
result.append(volume_dict)
except Exception as e:
pass
if len(result) != 0:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_filter_ec2_by_tags/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_filter_ec2_by_tags/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_ec2_by_tags/aws_filter_ec2_by_tags.json
================================================
{
"action_title": "Filter AWS EC2 Instance",
"action_description": "Filter AWS EC2 Instance",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_ec2_by_tags",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_filter_ec2_by_tags/aws_filter_ec2_by_tags.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import List
from unskript.connectors.aws import aws_get_paginator
import pprint
from beartype import beartype
class InputSchema(BaseModel):
tag_key: str = Field(
title='Tag Key',
description='The key of the tag.')
tag_value: str = Field(
title='Tag Value',
description='The value of the key.')
region: str = Field(
title='Region',
description='AWS Region.')
@beartype
def aws_filter_ec2_by_tags_printer(output):
if output is None:
return
pprint.pprint({"Instances": output})
@beartype
def aws_filter_ec2_by_tags(handle, tag_key: str, tag_value: str, region: str) -> List:
"""aws_filter_ec2_by_tags Returns an array of instances matching tags.
:type handle: object
:param handle: Object returned by the task.validate(...) method.
:type tag_key: string
:param tag_key: Key for the EC2 instance tag.
:type tag_value: string
:param tag_value: value for the EC2 instance tag.
:type region: string
:param region: EC2 instance region.
:rtype: Array of instances matching tags.
"""
ec2Client = handle.client('ec2', region_name=region)
res = aws_get_paginator(ec2Client, "describe_instances", "Reservations",
Filters=[{'Name': 'tag:' + tag_key, 'Values': [tag_value]}])
result = []
for reservation in res:
for instance in reservation['Instances']:
result.append(instance['InstanceId'])
return result
================================================
FILE: AWS/legos/aws_filter_ec2_by_vpc/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_filter_ec2_by_vpc/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_ec2_by_vpc/aws_filter_ec2_by_vpc.json
================================================
{
"action_title": "Filter AWS EC2 instance by VPC Ids",
"action_description": "Use this Action to Filter AWS EC2 Instance by VPC Ids",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_ec2_by_vpc",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2","CATEGORY_TYPE_AWS_VPC" ]
}
================================================
FILE: AWS/legos/aws_filter_ec2_by_vpc/aws_filter_ec2_by_vpc.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import List
from unskript.connectors.aws import aws_get_paginator
import pprint
class InputSchema(BaseModel):
vpc_id: str = Field(
title='VPC Id',
description='VPC ID of the instances.')
region: str = Field(
title='Region',
description='AWS Region.')
def aws_filter_ec2_by_vpc_printer(output):
if output is None:
return
pprint.pprint({"Instances": output})
def aws_filter_ec2_by_vpc(handle, vpc_id: str, region: str) -> List:
"""aws_filter_ec2_by_vpc_id Returns a array of instances matching the vpc id.
:type handle: object
:param handle: Object containing global params for the notebook.
:type vpc_id: string
:param vpc_id: VPC ID of the instances.
:type region: string
:param region: AWS Region.
:rtype: Array of the instances maching the vpc id.
"""
# Input param validation.
ec2Client = handle.client('ec2', region_name=region)
res = aws_get_paginator(ec2Client, "describe_instances", "Reservations",
Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}])
result = []
for reservation in res:
for instance in reservation['Instances']:
result.append(instance['InstanceId'])
return result
================================================
FILE: AWS/legos/aws_filter_ec2_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://unskript.com)
================================================
FILE: AWS/legos/aws_filter_ec2_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_ec2_instances/aws_filter_ec2_instances.json
================================================
{
"action_title": "Filter All AWS EC2 Instance",
"action_description": "Filter All AWS EC2 Instance",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_ec2_instances",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_filter_ec2_instances/aws_filter_ec2_instances.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import List
from unskript.connectors.aws import aws_get_paginator
import pprint
from beartype import beartype
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region.')
@beartype
def aws_filter_ec2_instances_printer(output):
if output is None:
return
pprint.pprint({"Instances": output})
@beartype
def aws_filter_ec2_instances(handle, region: str) -> List:
"""aws_filter_ec2_by_tags Returns an array of instances.
:type region: string
:param region: Used to filter the volume for specific region.
:rtype: Array of instances.
"""
# Input param validation.
ec2Client = handle.client('ec2', region_name=region)
res = aws_get_paginator(ec2Client, "describe_instances", "Reservations")
result = []
for reservation in res:
for instance in reservation['Instances']:
result.append(instance['InstanceId'])
return result
================================================
FILE: AWS/legos/aws_filter_ec2_without_lifetime_tag/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_filter_instances_without_termination_and_lifetime_tag/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_instances_without_termination_and_lifetime_tag/aws_filter_instances_without_termination_and_lifetime_tag.json
================================================
{
"action_title": "Filter AWS EC2 Instances Without Termination and Lifetime Tag",
"action_description": "Filter AWS EC2 Instances Without Termination and Lifetime Tag and Check of they are valid",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_instances_without_termination_and_lifetime_tag",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["filter"],
"action_nouns": ["aws","instances","without","termination","lifetime","tag"],
"action_is_check":true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_COST_OPT","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ],
"action_next_hop":["29ce1935204c64d816fd1f01f4fe41e8d8bd47725b899535c6acee703a7bcf0d"],
"action_next_hop_parameter_mapping":{"29ce1935204c64d816fd1f01f4fe41e8d8bd47725b899535c6acee703a7bcf0d": {"name": "Terminate EC2 Instances Without Valid Lifetime Tags", "region": ".[0].region", "instance_ids":".[0].instances"}}
}
================================================
FILE: AWS/legos/aws_filter_instances_without_termination_and_lifetime_tag/aws_filter_instances_without_termination_and_lifetime_tag.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import List, Tuple, Optional
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
import pprint
from datetime import datetime, date
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='Name of the AWS Region'
)
termination_tag_name: Optional[str] = Field(
default="terminationDateTag",
title='Termination Date Tag Name',
description='Name of the Termination Date Tag given to an EC2 instance. By default "terminationDateTag" is considered '
)
lifetime_tag_name: Optional[str] = Field(
default="lifetimeTag",
title='Lifetime Tag Name',
description='Name of the Lifetime Date Tag given to an EC2 instance. By default "lifetimeTag" is considered '
)
def aws_filter_instances_without_termination_and_lifetime_tag_printer(output):
if output is None:
return
pprint.pprint(output)
def fetch_instances_from_valid_region(reservations, aws_region, termination_tag_name, lifetime_tag_name):
result = []
right_now = date.today()
for reservation in reservations:
for instance in reservation.get('Instances', []):
instance_id = instance.get('InstanceId')
tagged_instance = instance.get('Tags', [])
tag_keys = {tag['Key'] for tag in tagged_instance}
tag_values = {tag['Key']: tag['Value'] for tag in tagged_instance}
if not (termination_tag_name in tag_keys and lifetime_tag_name in tag_keys):
if instance_id:
result.append(instance_id)
continue # Skip to next instance if tags not found
try:
termination_date = datetime.strptime(tag_values.get(termination_tag_name, ''), '%d-%m-%Y').date()
if termination_date < right_now:
result.append(instance_id)
lifetime_value = tag_values.get(lifetime_tag_name)
launch_date = datetime.strptime(instance.get('LaunchTime').strftime("%d-%m-%Y"),'%d-%m-%Y').date()
if lifetime_value != 'INDEFINITE' and launch_date < right_now:
result.append(instance_id)
except Exception as e:
if instance_id:
result.append(instance_id)
print(f"Error processing instance {instance_id}: {e}")
return {'region': aws_region, 'instances': result} if result else {}
def aws_filter_instances_without_termination_and_lifetime_tag(handle, region: str=None, termination_tag_name:str='terminationDateTag', lifetime_tag_name:str='lifetimeTag') -> Tuple:
"""aws_filter_ec2_without_lifetime_tag Returns an List of instances which not have lifetime tag.
Assumed tag key format - terminationDateTag, lifetimeTag
Assumed Date format for both keys is -> dd-mm-yy
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: Optional, Name of AWS Region
:type termination_tag_name: string
:param termination_tag_name: Optional, Name of the Termination Date Tag given to an EC2 instance. By default "terminationDateTag" is considered
:type lifetime_tag_name: string
:param lifetime_tag_name: Optional, Name of the Lifetime Date Tag given to an EC2 instance. By default "lifetimeTag" is considered
:rtype: Tuple of status, instances which dont having terminationDateTag and lifetimeTag, and error
"""
final_list = []
all_regions = [region] if region else aws_list_all_regions(handle=handle)
for r in all_regions:
try:
ec2Client = handle.client('ec2', region_name=r)
all_reservations = aws_get_paginator(ec2Client, "describe_instances", "Reservations")
instances_without_tags = fetch_instances_from_valid_region(all_reservations, r, termination_tag_name, lifetime_tag_name)
if instances_without_tags:
final_list.append(instances_without_tags)
except Exception as e:
pass
if final_list:
return (False, final_list)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_filter_large_ec2_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://unskript.com)
================================================
FILE: AWS/legos/aws_filter_large_ec2_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_large_ec2_instances/aws_filter_large_ec2_instances.json
================================================
{
"action_title": "AWS Filter Large EC2 Instances",
"action_description": "This Action to filter all instances whose instanceType contains Large or xLarge, and that DO NOT have the largetag key/value.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_large_ec2_instances",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_filter_large_ec2_instances/aws_filter_large_ec2_instances.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import List
from unskript.connectors.aws import aws_get_paginator
import pprint
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region of the ECS service.')
tag_key: str = Field(
title='Tag Key',
description='The key for the EC2 instance tag.')
tag_value: str = Field(
title='Tag Value',
description='The value for the EC2 instance tag.')
def aws_filter_large_ec2_instances_printer(output):
if output is None:
return
pprint.pprint({"Instances": output})
def aws_filter_large_ec2_instances(handle, tag_key: str, tag_value: str, region: str) -> List:
"""aws_filter_large_ec2_instances Returns an array of instances with large instance type
:type handle: object
:param handle: Object returned by the task.validate(...) method.
:type tag_key: string
:param tag_key: The key for the EC2 instance tag.
:type tag_value: string
:param tag_value: The value for the EC2 instance tag.
:type region: string
:param region: EC2 instance region.
:rtype: Array of instances with large instance type.
"""
result = []
try:
ec2Client = handle.client('ec2', region_name=region)
res = aws_get_paginator(ec2Client, "describe_instances", "Reservations",
Filters=[{'Name': 'instance-type', 'Values': ['*large']}])
for reservation in res:
for instance in reservation['Instances']:
if not any(tag['Key'] == tag_key and tag['Value'] == tag_value for tag in instance["Tags"]):
result.append(instance['InstanceId'])
except Exception as e:
result.append(e)
return result
================================================
FILE: AWS/legos/aws_filter_long_running_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_filter_long_running_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_long_running_instances/aws_filter_long_running_instances.json
================================================
{
"action_title": "AWS Find Long Running EC2 Instances",
"action_description": "This action list a all instances that are older than the threshold",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_long_running_instances",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_filter_long_running_instances/aws_filter_long_running_instances.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import List
from unskript.connectors.aws import aws_get_paginator
import pprint
from datetime import datetime, timedelta
import pytz
from beartype import beartype
class InputSchema(BaseModel):
threshold: int = Field(
default=30,
title="Threshold (in day's)",
description="(in day's) The threshold to check the instances older than the threshold.")
region: str = Field(
title='Region',
description='AWS Region')
def aws_filter_long_running_instances_printer(output):
if output is None:
return
pprint.pprint({"Instances": output})
def aws_filter_long_running_instances(handle, region: str, threshold: int = 10) -> List:
"""aws_filter_long_running_instances Returns an array of long running EC2 instances.
:type handle: object
:param handle: Object returned by the task.validate(...) method.
:type region: string
:param region: EC2 instance region.
:type threshold: string
:param threshold: (in days) The threshold to check the instances older than the threshold.
:rtype: Array of long running EC2 instances.
"""
result = []
current_time = datetime.now(pytz.UTC)
ec2Client = handle.client('ec2', region_name=region)
res = aws_get_paginator(ec2Client, "describe_instances", "Reservations")
for reservation in res:
for instance in reservation['Instances']:
launch_time = instance["LaunchTime"]
running_time = current_time - launch_time
if running_time > timedelta(days=int(threshold)):
result.append(instance['InstanceId'])
return result
================================================
FILE: AWS/legos/aws_filter_old_ebs_snapshots/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_filter_old_ebs_snapshots/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_old_ebs_snapshots/aws_filter_old_ebs_snapshots.json
================================================
{
"action_title": "AWS Filter Old EBS Snapshots",
"action_description": "This action list a all snapshots details that are older than the threshold",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_old_ebs_snapshots",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_poll": true,
"action_next_hop": ["303d6481e8cfa508d9ba11f847906c7d46f30a1c70f9b6b0e04b12409e74f704"],
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EBS" ],
"action_next_hop_parameter_mapping":{"303d6481e8cfa508d9ba11f847906c7d46f30a1c70f9b6b0e04b12409e74f704": {"name": "Delete Old EBS Snapshots", "region":".[0].region","snapshot_ids":"map(.snapshot_id)"}}
}
================================================
FILE: AWS/legos/aws_filter_old_ebs_snapshots/aws_filter_old_ebs_snapshots.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import List, Optional, Tuple
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
import pprint
from datetime import datetime, timedelta
import pytz
class InputSchema(BaseModel):
region: Optional[str] = Field(
title='Region',
description='AWS Region.')
threshold: Optional[int] = Field(
default=30,
title="Threshold (in days)",
description="(in day's) The threshold to check the snapshots older than the threshold.")
def aws_filter_old_ebs_snapshots_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_filter_old_ebs_snapshots(handle, region: str="", threshold: int = 30) -> Tuple:
"""aws_filter_old_ebs_snapshots Returns an array of EBS snapshots details.
:type region: string
:param region: AWS Region.
:type threshold: int
:param threshold: (in days) The threshold to check the snapshots older than the threshold.
:rtype: List of EBS snapshots details.
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
# Filtering the volume by region
current_time = datetime.now(pytz.UTC)
ec2Client = handle.resource('ec2', region_name=reg)
response = ec2Client.snapshots.filter(OwnerIds=['self'])
for snapshot in response:
snap_data = {}
running_time = current_time - snapshot.start_time
if running_time > timedelta(days=int(threshold)):
snap_data["region"] = reg
snap_data["snapshot_id"] = snapshot.id
result.append(snap_data)
except Exception as e:
pass
if len(result)!=0:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_filter_public_s3_buckets_by_acl/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_filter_public_s3_buckets_by_acl/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_public_s3_buckets_by_acl/aws_filter_public_s3_buckets_by_acl.json
================================================
{
"action_title": "Get AWS public S3 Buckets using ACL",
"action_description": "Get AWS public S3 Buckets using ACL",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_public_s3_buckets_by_acl",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_is_check":true,
"action_verbs": ["filter"],
"action_nouns": ["aws","s3","public","buckets","by","acl"],
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_S3" ],
"action_next_hop":["750987144b20d7b5984a37e58c2e17b69fd33f799a1f027f0ff7532cee5913c6"],
"action_next_hop_parameter_mapping":{"750987144b20d7b5984a37e58c2e17b69fd33f799a1f027f0ff7532cee5913c6": {"name": "Restrict S3 Buckets with READ/WRITE Permissions to all Authenticated Users", "region": ".[0].region", "bucket_names":"map(.bucket)"}}
}
================================================
FILE: AWS/legos/aws_filter_public_s3_buckets_by_acl/aws_filter_public_s3_buckets_by_acl.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.legos.aws.aws_get_s3_buckets.aws_get_s3_buckets import aws_get_s3_buckets
from unskript.enums.aws_acl_permissions_enums import BucketACLPermissions
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='Name of the AWS Region'
)
permission: Optional[BucketACLPermissions] = Field(
default=BucketACLPermissions.READ,
title="S3 Bucket's ACL Permission",
description="Set of permissions that AWS S3 supports in an ACL for buckets and objects"
)
def aws_filter_public_s3_buckets_by_acl_printer(output):
if output is None:
return
pprint.pprint(output)
def check_publicly_accessible_buckets(s3Client,b,all_permissions):
public_check = ["http://acs.amazonaws.com/groups/global/AuthenticatedUsers",
"http://acs.amazonaws.com/groups/global/AllUsers"]
public_buckets = False
try:
res = s3Client.get_bucket_acl(Bucket=b)
for perm in all_permissions:
for grant in res["Grants"]:
if 'Permission' in grant.keys() and perm == grant["Permission"]:
if 'URI' in grant["Grantee"] and grant["Grantee"]["URI"] in public_check:
public_buckets = True
except Exception:
pass
return public_buckets
def aws_filter_public_s3_buckets_by_acl(
handle,
permission:BucketACLPermissions=BucketACLPermissions.READ,
region: str=None
) -> Tuple:
"""aws_filter_public_s3_buckets_by_acl get list of public buckets.
Note- By default(if no permissions are given) READ and WRITE ACL Permissioned S3 buckets are
checked for public access.Other ACL Permissions are - "READ_ACP"|"WRITE_ACP"|"FULL_CONTROL"
:type handle: object
:param handle: Object returned from task.validate(...)
:type permission: Enum
:param permission: Set of permissions that AWS S3 supports in an ACL for buckets and objects.
:type region: string
:param region: location of the bucket.
:rtype: Object with status, list of public S3 buckets with READ/WRITE ACL Permissions, and errors
"""
all_permissions = [permission]
if permission is None or len(permission)==0:
all_permissions = ["READ","WRITE"]
result = []
all_buckets = []
all_regions = [region]
if region is None or len(region)==0:
all_regions = aws_list_all_regions(handle=handle)
try:
for r in all_regions:
s3Client = handle.client('s3',region_name=r)
output = aws_get_s3_buckets(handle=handle, region=r)
if len(output)!= 0:
for o in output:
all_buckets_dict = {}
all_buckets_dict["region"]=r
all_buckets_dict["bucket"]=o
all_buckets.append(all_buckets_dict)
except Exception as e:
raise e
for bucket in all_buckets:
s3Client = handle.client('s3',region_name= bucket['region'])
flag = check_publicly_accessible_buckets(s3Client,bucket['bucket'], all_permissions)
if flag:
result.append(bucket)
if len(result)!=0:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_filter_target_groups_by_tags/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_filter_target_groups_by_tags/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_target_groups_by_tags/aws_filter_target_groups_by_tags.json
================================================
{
"action_title": "Filter AWS Target groups by tag name",
"action_description": "Filter AWS Target groups which have the provided tag attached to it. It also returns the value of that tag for each target group",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_target_groups_by_tags",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_VPC","CATEGORY_TYPE_AWS_ELB" ]
}
================================================
FILE: AWS/legos/aws_filter_target_groups_by_tags/aws_filter_target_groups_by_tags.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
class InputSchema(BaseModel):
tag_key: str = Field(
title='Tag name',
description='Name of the tag to filter by.')
region: str = Field(
title='Region',
description='AWS Region.')
def aws_filter_target_groups_by_tags_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_filter_target_groups_by_tags(handle, tag_key: str, region: str) -> List:
"""aws_filter_target_groups_by_tags Returns a array of dict with target group and tag value.
:type handle: object
:param handle: Object containing global params for the notebook.
:type vpc_id: string
:param vpc_id: VPC ID of the instances.
:type region: string
:param region: AWS Region.
:rtype: Returns a array of dict with target group and tag value.
"""
elbv2Client = handle.client('elbv2', region_name=region)
tbs = aws_get_paginator(elbv2Client, "describe_target_groups", "TargetGroups")
tbArnsList = []
output = []
count = 0
tbsLength = len(tbs)
for index, tb in enumerate(tbs):
# Need to call describe_tags to get the tags associated with these TGs,
# however that call can only take 20 TGs.
tbArnsList.append(tb.get('TargetGroupArn'))
count = count + 1
if count == 20 or index == tbsLength - 1:
tagDescriptions = elbv2Client.describe_tags(ResourceArns=tbArnsList).get('TagDescriptions')
# Check if the tag name exists in any of the TGs.
for tagDescription in tagDescriptions:
for tag in tagDescription.get('Tags'):
if tag.get('Key') == tag_key:
output.append({
"ResourceARN": tagDescription.get('ResourceArn'),
"TagValue": tag.get('Value')
})
break
count = 0
tbArnsList = []
return output
================================================
FILE: AWS/legos/aws_filter_unencrypted_s3_buckets/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_filter_unencrypted_s3_buckets/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_unencrypted_s3_buckets/aws_filter_unencrypted_s3_buckets.json
================================================
{
"action_title": "Filter AWS Unencrypted S3 Buckets",
"action_description": "Filter AWS Unencrypted S3 Buckets",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_unencrypted_s3_buckets",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check":true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS","CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_S3" ],
"action_next_hop":["50d9c6abd7dce3ff9183d4135353e82859bc5a9639455b35bd229331be6048df"],
"action_next_hop_parameter_mapping":{"50d9c6abd7dce3ff9183d4135353e82859bc5a9639455b35bd229331be6048df": {"name": "Encrypt unencrypted S3 buckets","region": ".[0].region", "bucket_name":"map(.bucket)"}}
}
================================================
FILE: AWS/legos/aws_filter_unencrypted_s3_buckets/aws_filter_unencrypted_s3_buckets.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from botocore.exceptions import ClientError
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='AWS Region.')
def aws_filter_unencrypted_s3_buckets_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_filter_unencrypted_s3_buckets(handle, region: str = "") -> Tuple:
"""aws_filter_unencrypted_s3_buckets List of unencrypted S3 bucket name .
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: Filter S3 buckets.
:rtype: Tuple with status result and list of unencrypted S3 bucket name.
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
s3Client = handle.client('s3', region_name=reg)
response = s3Client.list_buckets()
# List unencrypted S3 buckets
for bucket in response['Buckets']:
try:
response = s3Client.get_bucket_encryption(Bucket=bucket['Name'])
encRules = response['ServerSideEncryptionConfiguration']['Rules']
except ClientError:
bucket_dict = {}
bucket_dict["region"] = reg
bucket_dict["bucket"] = bucket['Name']
result.append(bucket_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_filter_unhealthy_instances_from_asg/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_filter_unhealthy_instances_from_asg/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_unhealthy_instances_from_asg/aws_filter_unhealthy_instances_from_asg.json
================================================
{
"action_title": "Get Unhealthy instances from ASG",
"action_description": "Get Unhealthy instances from Auto Scaling Group",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_unhealthy_instances_from_asg",
"action_needs_credential": true,
"action_is_check": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ASG","CATEGORY_TYPE_AWS_EC2" ],
"action_next_hop": ["680ad9d119afab5f647e1afe7826b88d89bf35304954c3328e65a2fcf470f930"],
"action_next_hop_parameter_mapping": {"680ad9d119afab5f647e1afe7826b88d89bf35304954c3328e65a2fcf470f930": {"name": "AWS Detach EC2 Instance from ASG", "region": ".[0].region", "instance_ids":"map(.InstanceId)"}}
}
================================================
FILE: AWS/legos/aws_filter_unhealthy_instances_from_asg/aws_filter_unhealthy_instances_from_asg.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='AWS Region of the ASG.')
def aws_filter_unhealthy_instances_from_asg_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_filter_unhealthy_instances_from_asg(handle, region: str = "") -> Tuple:
"""aws_filter_unhealthy_instances_from_asg gives unhealthy instances from ASG
:type region: string
:param region: AWS region.
:rtype: CheckOutput with status result and list of unhealthy instances from ASG.
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
asg_client = handle.client('autoscaling', region_name=reg)
response = aws_get_paginator(
asg_client,
"describe_auto_scaling_instances",
"AutoScalingInstances"
)
# filter instances to only include those that are in an "unhealthy" state
for instance in response:
data_dict = {}
if instance['HealthStatus'] == 'Unhealthy':
data_dict["InstanceId"] = instance["InstanceId"]
data_dict["AutoScalingGroupName"] = instance["AutoScalingGroupName"]
data_dict["region"] = reg
result.append(data_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_filter_untagged_ec2_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_filter_untagged_ec2_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_untagged_ec2_instances/aws_filter_untagged_ec2_instances.json
================================================
{
"action_title": "Filter AWS Untagged EC2 Instances",
"action_description": "Filter AWS Untagged EC2 Instances",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_untagged_ec2_instances",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["filter"],
"action_nouns": ["aws","instances","untagged"],
"action_is_check": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_COST_OPT" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2"],
"action_next_hop": ["a16703da15d9e9e2d8a56b146e730b5e4c1496721ff1dc8606a5021d521ed9e3"],
"action_next_hop_parameter_mapping": {"a16703da15d9e9e2d8a56b146e730b5e4c1496721ff1dc8606a5021d521ed9e3": {"name": "Stop all Untagged AWS EC2 Instances", "region": ".[0].region", "instance_ids":".map(.instanceID)"}}
}
================================================
FILE: AWS/legos/aws_filter_untagged_ec2_instances/aws_filter_untagged_ec2_instances.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Tuple, Optional
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='Name of the AWS Region'
)
def aws_filter_untagged_ec2_instances_printer(output):
if output is None:
return
pprint.pprint(output)
def check_untagged_instance(res, r):
instance_list = []
for reservation in res:
for instance in reservation['Instances']:
instances_dict = {}
tags = instance.get('Tags', None)
if tags is None:
instances_dict['region'] = r
instances_dict['instanceID'] = instance['InstanceId']
instance_list.append(instances_dict)
return instance_list
def aws_filter_untagged_ec2_instances(handle, region: str= None) -> Tuple:
"""aws_filter_untagged_ec2_instances Returns an array of instances which has no tags.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: str
:param region: Region to filter instances.
:rtype: Tuple of status, and list of untagged EC2 Instances
"""
if not handle or (region and region not in aws_list_all_regions(handle)):
raise ValueError("Invalid input parameters provided.")
result = []
all_regions = [region]
if region is None or len(region) == 0:
all_regions = aws_list_all_regions(handle=handle)
for r in all_regions:
try:
ec2Client = handle.client('ec2', region_name=r)
res = aws_get_paginator(ec2Client, "describe_instances", "Reservations")
untagged_instances = check_untagged_instance(res, r)
result.extend(untagged_instances)
except Exception as e:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_filter_unused_keypairs/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_filter_unused_keypairs/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_unused_keypairs/aws_filter_unused_keypairs.json
================================================
{
"action_title": "Filter AWS Unused Keypairs",
"action_description": "Filter AWS Unused Keypairs",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_unused_keypairs",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_is_check": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ],
"action_next_hop":["a28edafac5f3bac3ca34d677d9b01a4bc6f74893e50bc103e5cefb00e0f48746"],
"action_next_hop_parameter_mapping":{}
}
================================================
FILE: AWS/legos/aws_filter_unused_keypairs/aws_filter_unused_keypairs.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.connectors.aws import aws_get_paginator
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='Name of the AWS Region')
def aws_filter_unused_keypairs_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_filter_unused_keypairs(handle, region: str = None) -> Tuple:
"""aws_filter_unused_keypairs Returns an array of KeyPair.
:type region: object
:param region: Object containing global params for the notebook.
:rtype: Object with status, result of unused key pairs, and error.
"""
all_keys_dict = {}
used_keys_dict = {}
key_pairs_all = []
used_key_pairs = []
result = []
all_regions = [region]
if region is None or len(region)==0:
all_regions = aws_list_all_regions(handle=handle)
for r in all_regions:
try:
ec2Client = handle.client('ec2', region_name=r)
key_pairs_all = list(map(
lambda i: i['KeyName'],
ec2Client.describe_key_pairs()['KeyPairs']
))
res = aws_get_paginator(ec2Client, "describe_instances", "Reservations")
for reservation in res:
for keypair in reservation['Instances']:
if 'KeyName'in keypair and keypair['KeyName'] not in used_key_pairs:
used_key_pairs.append(keypair['KeyName'])
used_keys_dict["region"]=r
used_keys_dict["key_name"]=used_key_pairs
all_keys_dict["region"]=r
all_keys_dict["key_name"]=key_pairs_all
final_dict = {}
final_list=[]
for k,v in all_keys_dict.items():
if v!=[]:
if k=="key_name":
for each in v:
if each not in used_keys_dict["key_name"]:
final_list.append(each)
if len(final_list)!=0:
final_dict["region"]=r
final_dict["unused_keys"]=final_list
if len(final_dict)!=0:
result.append(final_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_filter_unused_log_streams/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_filter_unused_log_streams/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_unused_log_streams/aws_filter_unused_log_streams.json
================================================
{
"action_title": "AWS Filter Unused Log Stream",
"action_description": "This action lists all log streams that are unused for all the log groups by the given threshold.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_unused_log_streams",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_is_check": true,
"action_next_hop":["64b6e7809ddfb1094901da74924ca3386510a1cd"],
"action_next_hop_parameter_mapping":{"64b6e7809ddfb1094901da74924ca3386510a1cd": {"name":"Delete Unused AWS Log Streams", "region": ".[0].region", "log_stream_name": "map(.log_stream_name)", "log_group_name":".[0].log_stream_group"}},
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_LOGS" ]
}
================================================
FILE: AWS/legos/aws_filter_unused_log_streams/aws_filter_unused_log_streams.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
import botocore.config
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
time_period_in_days: Optional[int] = Field(
default=30,
title="Threshold (in days)",
description="(in days) The threshold to filter the unused log strams.")
region: Optional[str] = Field(
title='Region',
description='AWS Region')
def aws_filter_unused_log_streams_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_filter_unused_log_streams(handle, region: str = "", time_period_in_days: int = 30) -> Tuple:
"""aws_filter_unused_log_streams Returns an array of unused log strams for all log groups.
:type region: string
:param region: Used to filter the volume for specific region.
:type time_period_in_days: int
:param time_period_in_days: (in days) The threshold to filter the unused log strams.
:rtype: Array of unused log strams for all log groups.
"""
result = []
now = datetime.utcnow()
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
start_time = now - timedelta(days=time_period_in_days)
config = botocore.config.Config(retries={'max_attempts': 10})
ec2Client = handle.client('logs', region_name=reg, config=config)
response = aws_get_paginator(ec2Client, "describe_log_groups", "logGroups")
for log_group in response:
log_group_name = log_group['logGroupName']
response1 = aws_get_paginator(ec2Client, "describe_log_streams", "logStreams",
logGroupName=log_group_name,
orderBy='LastEventTime',
descending=True)
for log_stream in response1:
unused_log_streams = {}
last_event_time = log_stream.get('lastEventTimestamp')
if last_event_time is None:
# The log stream has never logged an event
unused_log_streams["log_group_name"] = log_group_name
unused_log_streams["log_stream_name"] = log_stream['logStreamName']
unused_log_streams["region"] = reg
result.append(unused_log_streams)
elif datetime.fromtimestamp(last_event_time/1000.0) < start_time:
# The log stream has not logged an event in the past given days
unused_log_streams["log_group_name"] = log_group_name
unused_log_streams["log_stream_name"] = log_stream['logStreamName']
unused_log_streams["region"] = reg
result.append(unused_log_streams)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_filter_unused_nat_gateway/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://unskript.com)
================================================
FILE: AWS/legos/aws_filter_unused_nat_gateway/__init__.py
================================================
================================================
FILE: AWS/legos/aws_filter_unused_nat_gateway/aws_filter_unused_nat_gateway.json
================================================
{
"action_title": "AWS Find Unused NAT Gateways",
"action_description": "This action to get all of the Nat gateways that have zero traffic over those",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_filter_unused_nat_gateway",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_is_check": true,
"action_next_hop":["f2b1eecf9b4f727ec80fc4d4f5c7915b788cafe969552af0a26f8db9747bbcd4"],
"action_next_hop_parameter_mapping":{"f2b1eecf9b4f727ec80fc4d4f5c7915b788cafe969552af0a26f8db9747bbcd4": {"name": "Delete Unused NAT Gateways","region":".[0].region","nat_gateway_ids":"map(.nat_gateway_id)"}},
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_NAT_GATEWAY","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_filter_unused_nat_gateway/aws_filter_unused_nat_gateway.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
region: Optional[str] = Field(
title='Region',
description='AWS Region.')
number_of_days: Optional[int] = Field(
title="Number of Days",
description='Number of days to check the Datapoints.')
def aws_filter_unused_nat_gateway_printer(output):
if output is None:
return
pprint.pprint(output)
def is_nat_gateway_used(handle, nat_gateway, start_time, end_time,number_of_days):
datapoints = []
if nat_gateway['State'] != 'deleted':
# Get the metrics data for the specified NAT Gateway over the last 7 days
try:
metrics_data = handle.get_metric_statistics(
Namespace='AWS/NATGateway',
MetricName='ActiveConnectionCount',
Dimensions=[
{
'Name': 'NatGatewayId',
'Value': nat_gateway['NatGatewayId']
},
],
StartTime=start_time,
EndTime=end_time,
Period=86400 * number_of_days,
Statistics=['Sum']
)
datapoints += metrics_data.get('Datapoints', [])
except Exception as e:
print(f"An error occurred while fetching metrics data for {nat_gateway['NatGatewayId']}: {e}")
return False
return len(datapoints) != 0 and datapoints[0].get('Sum', 0) != 0
def aws_filter_unused_nat_gateway(handle, number_of_days: int = 7, region: str = "") -> Tuple:
"""aws_get_natgateway_by_vpc Returns an array of NAT gateways.
:type region: string
:param region: Region to filter NAT Gateways.
:type number_of_days: int
:param number_of_days: Number of days to check the Datapoints.
:rtype: Array of NAT gateways.
"""
result = []
if not handle or (region and region not in aws_list_all_regions(handle)):
raise ValueError("Invalid input parameters provided.")
end_time = datetime.utcnow()
start_time = end_time - timedelta(days=number_of_days)
all_regions = [region] if region else aws_list_all_regions(handle)
for reg in all_regions:
try:
ec2Client = handle.client('ec2', region_name=reg)
cloudwatch = handle.client('cloudwatch', region_name=reg)
response = ec2Client.describe_nat_gateways()
for nat_gateway in response.get('NatGateways', []):
nat_gateway_info = {}
if not is_nat_gateway_used(cloudwatch, nat_gateway, start_time, end_time, number_of_days):
nat_gateway_info["nat_gateway_id"] = nat_gateway['NatGatewayId']
nat_gateway_info["region"] = reg
result.append(nat_gateway_info)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_find_elbs_with_no_targets_or_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_find_elbs_with_no_targets_or_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_find_elbs_with_no_targets_or_instances/aws_find_elbs_with_no_targets_or_instances.json
================================================
{
"action_title": "Find AWS ELBs with no targets or instances",
"action_description": "Find AWS ELBs with no targets or instances attached to them.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_find_elbs_with_no_targets_or_instances",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_COST_OPT" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ELB"],
"action_supports_poll": true,
"action_next_hop": ["2aba76792cb2802cae55deb60d28820522aeba93865572a1e9c7ddc5309e1312"],
"action_next_hop_parameter_mapping": {"2aba76792cb2802cae55deb60d28820522aeba93865572a1e9c7ddc5309e1312": {"name": "Delete AWS ELBs With No Targets Or Instances", "region":".[0].region","elb_arns":"map(.elb_arn)","elb_names":"map(.elb_name)" }}
}
================================================
FILE: AWS/legos/aws_find_elbs_with_no_targets_or_instances/aws_find_elbs_with_no_targets_or_instances.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Tuple
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.connectors.aws import aws_get_paginator
import pprint
class InputSchema(BaseModel):
region: Optional[str] = Field('', description='AWS Region.', title='region')
def aws_find_elbs_with_no_targets_or_instances_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_find_elbs_with_no_targets_or_instances(handle, region: str = "")->Tuple:
"""aws_find_elbs_with_no_targets_or_instances Returns details of Elb's with no target groups or instances
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: str
:param region: AWS Region
:rtype: Tuple of status, and details of ELB's with no targets or instances
"""
result = []
all_load_balancers = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
elbv2Client = handle.client('elbv2', region_name=reg)
elbv2_response = aws_get_paginator(elbv2Client, "describe_load_balancers", "LoadBalancers")
elbClient = handle.client('elb', region_name=reg)
elb_response = elbClient.describe_load_balancers()
for lb in elbv2_response:
elb_dict = {}
elb_dict["load_balancer_name"] = lb['LoadBalancerName']
elb_dict["load_balancer_arn"] = lb['LoadBalancerArn']
elb_dict["load_balancer_type"] = lb['Type']
elb_dict["load_balancer_dns"] = lb['DNSName']
elb_dict["region"] = reg
all_load_balancers.append(elb_dict)
for lb in elb_response['LoadBalancerDescriptions']:
elb_dict = {}
elb_dict["load_balancer_name"] = lb['LoadBalancerName']
elb_dict["load_balancer_type"] = 'classic'
elb_dict["load_balancer_dns"] = lb['DNSName']
elb_dict["region"] = reg
all_load_balancers.append(elb_dict)
except Exception as e:
pass
for load_balancer in all_load_balancers:
if load_balancer['load_balancer_type']=='network' or load_balancer['load_balancer_type']=='application':
elbv2Client = handle.client('elbv2', region_name=load_balancer['region'])
target_groups = elbv2Client.describe_target_groups(
LoadBalancerArn=load_balancer['load_balancer_arn']
)
if len(target_groups['TargetGroups']) == 0:
elb_dict = {}
elb_dict["elb_arn"] = load_balancer['load_balancer_arn']
elb_dict["elb_name"] = load_balancer['load_balancer_name']
elb_dict["region"] = load_balancer['region']
elb_dict["type"] = load_balancer['load_balancer_type']
result.append(elb_dict)
else:
elbClient = handle.client('elb', region_name=load_balancer['region'])
res = elbClient.describe_instance_health(
LoadBalancerName=load_balancer['load_balancer_name'],
)
if len(res['InstanceStates'])==0:
elb_dict = {}
elb_dict["elb_name"] = load_balancer['load_balancer_name']
elb_dict["region"] = load_balancer['region']
elb_dict["type"] = load_balancer['load_balancer_type']
result.append(elb_dict)
if len(result) != 0:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_find_idle_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_find_idle_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_find_idle_instances/aws_find_idle_instances.json
================================================
{
"action_title": "AWS Find Idle Instances",
"action_description": "Find Idle EC2 instances",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_find_idle_instances",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": ["c03babff32b83949e6ca20a49901d42a5a74ed3036de4609096390c9f6d0851a"],
"action_next_hop_parameter_mapping": {"c03babff32b83949e6ca20a49901d42a5a74ed3036de4609096390c9f6d0851a": {"name": "Stop Idle EC2 Instances", "region": ".[0].region", "instance_ids":"map(.instance)"}},
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2"]
}
================================================
FILE: AWS/legos/aws_find_idle_instances/aws_find_idle_instances.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, Tuple
import datetime
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
idle_cpu_threshold: Optional[int] = Field(
default=5,
description='Idle CPU threshold (in percent)',
title='Idle CPU Threshold'
)
idle_duration: Optional[int] = Field(
default=6,
description='Idle duration (in hours)',
title='Idle Duration'
)
region: Optional[str] = Field(
default='',
description='AWS Region to get the instances from. Eg: "us-west-2"',
title='Region',
)
def aws_find_idle_instances_printer(output):
if output is None:
return
pprint.pprint(output)
def is_instance_idle(instance_id , idle_cpu_threshold, idle_duration, cloudwatchclient):
try:
now = datetime.datetime.utcnow()
start_time = now - datetime.timedelta(hours=idle_duration)
cpu_utilization_stats = cloudwatchclient.get_metric_statistics(
Namespace="AWS/EC2",
MetricName="CPUUtilization",
Dimensions=[{"Name": "InstanceId", "Value": instance_id}],
StartTime=start_time.isoformat(),
EndTime=now.isoformat(),
Period=3600,
Statistics=["Average"],
)
if not cpu_utilization_stats["Datapoints"]:
return False
average_cpu = sum(datapoint["Average"] for datapoint in cpu_utilization_stats["Datapoints"]) / len(cpu_utilization_stats["Datapoints"])
except Exception as e:
raise e
return average_cpu < idle_cpu_threshold
def aws_find_idle_instances(
handle,
idle_cpu_threshold:int = 5,
idle_duration:int = 6,
region:str=''
) -> Tuple:
"""aws_find_idle_instances finds idle EC2 instances
:type region: string
:param region: AWS Region to get the instances from. Eg: "us-west-2"
:type idle_cpu_threshold: int
:param idle_cpu_threshold: (in percent) Idle CPU threshold (in percent)
:type idle_duration: int
:param idle_duration: (in hours) Idle CPU threshold (in hours)
:rtype: Tuple with status result and list of Idle Instances.
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
ec2client = handle.client('ec2', region_name=reg)
cloudwatchclient = handle.client("cloudwatch", region_name=reg)
all_instances = ec2client.describe_instances()
for instance in all_instances['Reservations']:
for i in instance['Instances']:
if i['State']["Name"] == "running" and is_instance_idle(
i['InstanceId'],
idle_cpu_threshold,
idle_duration,
cloudwatchclient
):
idle_instances = {}
idle_instances["instance"] = i['InstanceId']
idle_instances["region"] = reg
result.append(idle_instances)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_find_long_running_lambdas/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_find_long_running_lambdas/__init__.py
================================================
================================================
FILE: AWS/legos/aws_find_long_running_lambdas/aws_find_long_running_lambdas.json
================================================
{
"action_title": "AWS Filter Lambdas with Long Runtime",
"action_description": "This action retrieves a list of all Lambda functions and searches for log events for each function for given runtime(duration).",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_find_long_running_lambdas",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_is_check": true,
"action_next_hop": [],
"action_next_hop_parameter_mapping": {},
"action_categories": [ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_find_long_running_lambdas/aws_find_long_running_lambdas.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Tuple, Optional
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
import pprint
import datetime
class InputSchema(BaseModel):
days_back: Optional[int] = Field(
default=7,
title="Days to Search",
description="(In days) An integer specifying the number of days to search back for logs.")
duration_threshold: Optional[int] = Field(
default=500,
title="Minimum Duration of a Lambda Function",
description="(In milliseconds) specifying the threshold for the minimum runtime of a Lambda function.")
region: Optional[str] = Field(
title='Region',
description='AWS Region')
def aws_find_long_running_lambdas_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_find_long_running_lambdas(handle, days_back: int = 7, duration_threshold: int = 500, region: str = "") -> Tuple:
"""aws_find_long_running_lambdas Returns an List long running lambdas.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: str
:param region: AWS Region.
:type days_back: int
:param days_back: (In days) An integer specifying the number of days to search back for logs.
:type duration_threshold: int
:param duration_threshold: (In milliseconds) specifying the threshold for the minimum runtime of a Lambda function.
:rtype: List long running lambdas.
"""
result = []
start_time = datetime.datetime.now() - datetime.timedelta(days=days_back)
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
lambda_client = handle.client('lambda', region_name=reg)
log_client = handle.client('logs', region_name=reg)
response = aws_get_paginator(lambda_client, "list_functions", "Functions")
for function in response:
function_name = function['FunctionName']
log_group_name = f"/aws/lambda/{function_name}"
try:
# Call the FilterLogEvents method to search the logs for the function
log_response = aws_get_paginator(log_client, "filter_log_events", "events",
logGroupName=log_group_name,
startTime=int(start_time.timestamp() * 1000))
for event in log_response:
if 'REPORT' in event['message']:
message_data = event['message'].split('\t')
duration_index = message_data.index('Duration:') + 1
duration_str = message_data[duration_index].strip()
duration = float(duration_str[:-2])
if duration >= duration_threshold:
result.append({'function_name': function_name, 'duration': duration, "region": reg})
except:
pass
except Exception as error:
pass
if len(result) != 0:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_find_low_connection_rds_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_find_low_connection_rds_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_find_low_connection_rds_instances/aws_find_low_connection_rds_instances.json
================================================
{
"action_title": "AWS Find Low Connections RDS instances Per Day",
"action_description": "This action will find RDS DB instances with a number of connections below the specified minimum in the specified region.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_find_low_connection_rds_instances",
"action_needs_credential": true,
"action_is_check": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_RDS" ],
"action_next_hop": [],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: AWS/legos/aws_find_low_connection_rds_instances/aws_find_low_connection_rds_instances.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Tuple
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.connectors.aws import aws_get_paginator
import datetime
import pprint
class InputSchema(BaseModel):
region: Optional[str] = Field(
default='',
title='Region for RDS',
description='Region of the RDS.'
)
min_connections: Optional[int] = Field(
default=10,
title='Minimum Number of Connections',
description='The minimum number of connections for an instance to be considered active.'
)
def aws_find_low_connection_rds_instances_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_find_low_connection_rds_instances(handle, min_connections:int = 10, region: str = "") -> Tuple:
"""aws_find_low_connection_rds_instances Gets information about RDS instances.
:type region: string
:param region: AWS Region.
:type min_connections: int
:param min_connections: The minimum number of connections for an instance to be considered active.
:rtype: A list containing information about RDS instances.
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
rds_Client = handle.client('rds', region_name=reg)
cloudwatch = handle.client('cloudwatch', region_name=reg)
response = aws_get_paginator(rds_Client, "describe_db_instances", "DBInstances")
for db in response:
db_instance_dict = {}
db_instance_identifier = db['DBInstanceIdentifier']
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(days=1)
response1 = cloudwatch.get_metric_statistics(
Namespace='AWS/RDS',
MetricName='DatabaseConnections',
Dimensions=[
{
'Name': 'DBInstanceIdentifier',
'Value': db_instance_identifier
}
],
StartTime=start_time,
EndTime=end_time,
Period=86460,
Statistics=['Sum']
)
data_points = response1['Datapoints']
if data_points:
connections = data_points[-1]['Sum']
if connections < min_connections:
db_instance_dict["region"] = reg
db_instance_dict["db_instance"] = db_instance_identifier
db_instance_dict["connections"] = int(connections)
result.append(db_instance_dict)
except Exception as error:
pass
if len(result) != 0:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_find_old_gen_emr_clusters/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_find_old_gen_emr_clusters/__init__.py
================================================
================================================
FILE: AWS/legos/aws_find_old_gen_emr_clusters/aws_find_old_gen_emr_clusters.json
================================================
{
"action_title": "AWS Find EMR Clusters of Old Generation Instances",
"action_description": "This action list of EMR clusters of old generation instances.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_find_old_gen_emr_clusters",
"action_needs_credential": true,
"action_is_check": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_RDS"],
"action_next_hop": [],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: AWS/legos/aws_find_old_gen_emr_clusters/aws_find_old_gen_emr_clusters.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Tuple
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.connectors.aws import aws_get_paginator
import pprint
class InputSchema(BaseModel):
region: Optional[str] = Field(
default='',
title='AWS Region',
description='AWS Region.'
)
def aws_find_old_gen_emr_clusters_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_find_old_gen_emr_clusters(handle, region: str = "") -> Tuple:
"""aws_find_old_gen_emr_clusters Gets list of old generation EMR clusters.
:type region: string
:param region: AWS Region.
:rtype: Tuple with list of old generation EMR clusters.
"""
result = []
all_regions = [region]
old_gen_type_prefixes = ['m1.', 'c1.', 'cc1.', 'm2.', 'cr1.', 'cg1.', 't1.']
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
emr_Client = handle.client('emr', region_name=reg)
response = aws_get_paginator(emr_Client, "list_clusters", "Clusters")
for cluster in response:
instance_groups_list = aws_get_paginator(emr_Client, "list_instance_groups", "InstanceGroups",
ClusterId=cluster['Id'])
for instance_group in instance_groups_list:
cluster_dict = {}
if instance_group['InstanceType'].startswith(tuple(old_gen_type_prefixes)):
cluster_dict["cluster_id"] = cluster['Id']
cluster_dict["region"] = reg
result.append(cluster_dict)
break
except Exception as error:
pass
if len(result) != 0:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_find_rds_instances_with_low_cpu_utilization/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_find_rds_instances_with_low_cpu_utilization/__init__.py
================================================
================================================
FILE: AWS/legos/aws_find_rds_instances_with_low_cpu_utilization/aws_find_rds_instances_with_low_cpu_utilization.json
================================================
{
"action_title": "AWS Find RDS Instances with low CPU Utilization",
"action_description": "This lego finds RDS instances are not utilizing their CPU resources to their full potential.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_find_rds_instances_with_low_cpu_utilization",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_next_hop":["655835b762ba634f02074a48e4bae12f7a3e29bb8e6776eb8d657ddbfe181a59"],
"action_next_hop_parameter_mapping":{"655835b762ba634f02074a48e4bae12f7a3e29bb8e6776eb8d657ddbfe181a59": {"name": "Delete RDS Instances with Low CPU Utilization", "region": ".[0].region", "db_identifiers":"map(.instance)"}},
"action_categories":[ "CATEGORY_TYPE_COST_OPT","CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS_RDS","CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_find_rds_instances_with_low_cpu_utilization/aws_find_rds_instances_with_low_cpu_utilization.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Tuple
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.connectors.aws import aws_get_paginator
import pprint
from datetime import datetime,timedelta
class InputSchema(BaseModel):
region: Optional[str] = Field(
'', description='AWS Region to get the RDS Instance', title='AWS Region'
)
duration_minutes: Optional[int] = Field(
5,
description='Value in minutes to get the start time of the metrics for CPU Utilization',
title='Duration of Start time',
)
utilization_threshold: Optional[int] = Field(
10,
description='The threshold percentage of CPU utilization for an RDS Instance.',
title='CPU Utilization Threshold',
)
def aws_find_rds_instances_with_low_cpu_utilization_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_find_rds_instances_with_low_cpu_utilization(handle, utilization_threshold:int=10, region: str = "", duration_minutes:int=5) -> Tuple:
"""aws_find_rds_instances_with_low_cpu_utilization finds RDS instances that have a lower cpu utlization than the given threshold
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: Region of the RDS.
:type utilization_threshold: integer
:param utilization_threshold: The threshold percentage of CPU utilization for an RDS Instance.
:type duration_minutes: integer
:param duration_minutes: Value in minutes to get the start time of the metrics for CPU Utilization
:rtype: status, list of instances and their region.
"""
if not handle or utilization_threshold < 0 or utilization_threshold > 100 or duration_minutes <= 0:
raise ValueError("Invalid input parameters provided.")
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
rdsClient = handle.client('rds', region_name=reg)
cloudwatchClient = handle.client('cloudwatch', region_name=reg)
all_instances = aws_get_paginator(rdsClient, "describe_db_instances", "DBInstances")
for db in all_instances:
response = cloudwatchClient.get_metric_data(
MetricDataQueries=[
{
'Id': 'cpu',
'MetricStat': {
'Metric': {
'Namespace': 'AWS/RDS',
'MetricName': 'CPUUtilization',
'Dimensions': [
{
'Name': 'DBInstanceIdentifier',
'Value': db['DBInstanceIdentifier']
},
]
},
'Period': 60,
'Stat': 'Average',
},
'ReturnData': True,
},
],
StartTime=(datetime.now() - timedelta(minutes=duration_minutes)).isoformat(),
EndTime=datetime.utcnow().isoformat(),
)
if 'Values' in response['MetricDataResults'][0]:
cpu_utilization = response['MetricDataResults'][0]['Values'][0]
if cpu_utilization < utilization_threshold:
db_instance_dict = {}
db_instance_dict["region"] = reg
db_instance_dict["instance"] = db['DBInstanceIdentifier']
result.append(db_instance_dict)
except Exception as error:
pass
if len(result) != 0:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_find_redshift_cluster_without_pause_resume_enabled/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_find_redshift_cluster_without_pause_resume_enabled/__init__.py
================================================
================================================
FILE: AWS/legos/aws_find_redshift_cluster_without_pause_resume_enabled/aws_find_redshift_cluster_without_pause_resume_enabled.json
================================================
{
"action_title": "AWS Find Redshift Cluster without Pause Resume Enabled",
"action_description": "Use This Action to AWS find redshift cluster for which paused resume are not Enabled",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_find_redshift_cluster_without_pause_resume_enabled",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_is_check":true,
"action_next_hop":[],
"action_next_hop_parameter_mapping":{"8b9c4eadb5f2fb817be0952f3ecb28c8e490ece6281286a74a95d5fe25019400": {"name": "AWS Ensure Redshift Clusters have Paused Resume Enabled", "region": ".[0].region", "redshift_clusters":"map(.cluster_name)"}},
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2","CATEGORY_TYPE_DB" ]
}
================================================
FILE: AWS/legos/aws_find_redshift_cluster_without_pause_resume_enabled/aws_find_redshift_cluster_without_pause_resume_enabled.py
================================================
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Tuple
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.connectors.aws import aws_get_paginator
import pprint
class InputSchema(BaseModel):
region: Optional[str] = Field(
default='',
title='Region',
description='AWS Region.')
def aws_find_redshift_cluster_without_pause_resume_enabled_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_find_redshift_cluster_without_pause_resume_enabled(handle, region: str = "") -> Tuple:
"""aws_find_redshift_cluster_without_pause_resume_enabled Gets all redshift cluster which don't have pause and resume not enabled.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: AWS Region.
:rtype: Tuple with the status result and a list of all redshift clusters that don't have pause and resume enabled.
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
redshift_Client = handle.client('redshift', region_name=reg)
response = aws_get_paginator(redshift_Client, "describe_clusters", "Clusters")
for cluster in response:
cluster_dict = {}
cluster_name = cluster["ClusterIdentifier"]
schedule_actions = aws_get_paginator(redshift_Client, "describe_scheduled_actions", "ScheduledActions",Filters=[{'Name': 'cluster-identifier', 'Values': [cluster_name]}])
if schedule_actions:
for actions in schedule_actions:
if "ResumeCluster" in actions["TargetAction"].keys() or "PauseCluster" in actions["TargetAction"].keys():
pass
else:
cluster_dict["cluster_name"] = cluster_name
cluster_dict["region"] = reg
result.append(cluster_dict)
else:
cluster_dict["cluster_name"] = cluster_name
cluster_dict["region"] = reg
result.append(cluster_dict)
except Exception as error:
pass
if len(result) != 0:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_find_redshift_clusters_with_low_cpu_utilization/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_find_redshift_clusters_with_low_cpu_utilization/__init__.py
================================================
================================================
FILE: AWS/legos/aws_find_redshift_clusters_with_low_cpu_utilization/aws_find_redshift_clusters_with_low_cpu_utilization.json
================================================
{
"action_title": "AWS Find Redshift Clusters with low CPU Utilization",
"action_description": "Find underutilized Redshift clusters in terms of CPU utilization.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_find_redshift_clusters_with_low_cpu_utilization",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_next_hop":["2a51c98c5c99d132011e285546e365402351fd3d09214041aea7592367bd48bf"],
"action_next_hop_parameter_mapping":{"2a51c98c5c99d132011e285546e365402351fd3d09214041aea7592367bd48bf": {"name": "Delete Redshift Clusters with Low CPU Utilization", "region": ".[0].region", "cluster_identifiers":"map(.cluster)"}},
"action_categories":["CATEGORY_TYPE_COST_OPT" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_REDSHIFT","CATEGORY_TYPE_AWS_CLOUDWATCH"]
}
================================================
FILE: AWS/legos/aws_find_redshift_clusters_with_low_cpu_utilization/aws_find_redshift_clusters_with_low_cpu_utilization.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Tuple
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
import pprint
from datetime import datetime,timedelta
class InputSchema(BaseModel):
region: Optional[str] = Field(
'', description='AWS Region to get the Redshift Cluster', title='AWS Region'
)
duration_minutes: Optional[int] = Field(
5,
description='Value in minutes to determine the start time of the data points. ',
title='Duration (in minutes)',
)
utilization_threshold: Optional[int] = Field(
10,
description='The threshold value in percent of CPU utilization of the Redshift cluster',
title='CPU utilization threshold(in %)',
)
def aws_find_redshift_clusters_with_low_cpu_utilization_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_find_redshift_clusters_with_low_cpu_utilization(handle, utilization_threshold:int=10, region: str = "", duration_minutes:int=5) -> Tuple:
"""aws_find_redshift_clusters_with_low_cpu_utilization finds Redshift Clusters that have a lower cpu utlization than the given threshold
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: Region of the Cluster.
:type utilization_threshold: integer
:param utilization_threshold: The threshold percentage of CPU utilization for a Redshift Cluster.
:type duration_minutes: integer
:param duration_minutes: The threshold percentage of CPU utilization for a Redshift Cluster.
:rtype: status, list of clusters and their region.
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
redshiftClient = handle.client('redshift', region_name=reg)
cloudwatchClient = handle.client('cloudwatch', region_name=reg)
for cluster in redshiftClient.describe_clusters()['Clusters']:
cluster_identifier = cluster['ClusterIdentifier']
response = cloudwatchClient.get_metric_statistics(
Namespace='AWS/Redshift',
MetricName='CPUUtilization',
Dimensions=[
{
'Name': 'ClusterIdentifier',
'Value': cluster_identifier
}
],
StartTime=(datetime.utcnow() - timedelta(minutes=duration_minutes)).isoformat(),
EndTime=datetime.utcnow().isoformat(),
Period=60,
Statistics=['Average']
)
if len(response['Datapoints']) != 0:
cpu_usage_percent = response['Datapoints'][-1]['Average']
if cpu_usage_percent < utilization_threshold:
cluster_dict = {}
cluster_dict["region"] = reg
cluster_dict["cluster"] = cluster_identifier
result.append(cluster_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_find_s3_buckets_without_lifecycle_policies/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_find_s3_buckets_without_lifecycle_policies/__init__.py
================================================
================================================
FILE: AWS/legos/aws_find_s3_buckets_without_lifecycle_policies/aws_find_s3_buckets_without_lifecycle_policies.json
================================================
{
"action_title": "AWS Find S3 Buckets without Lifecycle Policies",
"action_description": "S3 lifecycle policies enable you to automatically transition objects to different storage classes or delete them when they are no longer needed. This action finds all S3 buckets without lifecycle policies. ",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_find_s3_buckets_without_lifecycle_policies",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_next_hop":["3d74913836e037a001f718b48f1e19010394b90afc2422d0572ab5c515521075"],
"action_next_hop_parameter_mapping":{"3d74913836e037a001f718b48f1e19010394b90afc2422d0572ab5c515521075": {"name": "Add Lifecycle Policy to S3 Buckets", "region": ".[0].region", "bucket_names":"map(.bucket_name)"}},
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_S3"]
}
================================================
FILE: AWS/legos/aws_find_s3_buckets_without_lifecycle_policies/aws_find_s3_buckets_without_lifecycle_policies.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.legos.aws.aws_get_s3_buckets.aws_get_s3_buckets import aws_get_s3_buckets
from typing import List, Optional, Tuple
import pprint
class InputSchema(BaseModel):
region: Optional[str] = Field('', description='AWS Region of S3 buckets.', title='Region')
def aws_find_s3_buckets_without_lifecycle_policies_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_find_s3_buckets_without_lifecycle_policies(handle, region: str="") -> Tuple:
"""aws_find_s3_buckets_without_lifecycle_policies List all the S3 buckets without lifecycle policies
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: AWS Region of the bucket
:rtype: Status, List of all the S3 buckets without lifecycle policies with regions
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
s3Session = handle.resource("s3", region_name=reg)
response = aws_get_s3_buckets(handle, region=reg)
for bucket in response:
bucket_region = s3Session.meta.client.get_bucket_location(Bucket=bucket)['LocationConstraint']
if bucket_region is None:
bucket_region = 'us-east-1'
if bucket_region != reg:
continue
bucket_lifecycle_configuration = s3Session.BucketLifecycleConfiguration(bucket)
try:
if bucket_lifecycle_configuration.rules:
continue
except Exception:
bucket_details = {}
bucket_details["bucket_name"] = bucket
bucket_details["region"] = reg
result.append(bucket_details)
except Exception:
pass
if len(result) != 0:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_finding_redundant_trails/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_finding_redundant_trails/__init__.py
================================================
================================================
FILE: AWS/legos/aws_finding_redundant_trails/aws_finding_redundant_trails.json
================================================
{
"action_title": "Finding Redundant Trails in AWS",
"action_description": "This action will find a redundant cloud trail if the attribute IncludeGlobalServiceEvents is true, and then we need to find multiple duplications.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_finding_redundant_trails",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_is_check":true,
"action_categories": ["CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_COST_OPT","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_CLOUDTRAIL"],
"action_next_hop": ["c4d55f5dd5bb964460f4ad7335daa8bb094792b0d64149dbddca019513f05598"],
"action_next_hop_parameter_mapping": {"c4d55f5dd5bb964460f4ad7335daa8bb094792b0d64149dbddca019513f05598": {"name": "AWS Lowering CloudTrail Costs by Removing Redundant Trails", "region": ".[].regions[0]"}}
}
================================================
FILE: AWS/legos/aws_finding_redundant_trails/aws_finding_redundant_trails.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Tuple
from pydantic import BaseModel
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
pass
def aws_finding_redundant_trails_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_finding_redundant_trails(handle) -> Tuple:
"""aws_finding_redundant_trails Returns an array of redundant trails in AWS
:type handle: object
:param handle: Object returned by the task.validate(...) method.
:rtype: Tuple with check status and list of redundant trails
"""
result = []
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
ec2Client = handle.client('cloudtrail', region_name=reg)
response = ec2Client.describe_trails()
for glob_service in response["trailList"]:
if glob_service["IncludeGlobalServiceEvents"] is True:
for i in result:
if i["trail_name"] == glob_service["Name"]:
i["regions"].append(reg)
if not any(i["trail_name"] == glob_service["Name"] for i in result):
trail_dict = {}
trail_dict["trail_name"] = glob_service["Name"]
trail_dict["regions"] = [reg]
result.append(trail_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_get_acount_number/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_alarms_list/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_alarms_list/aws_get_alarms_list.json
================================================
{
"action_title": "Get AWS CloudWatch Alarms List",
"action_description": "Get AWS CloudWatch Alarms List",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_alarms_list",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_CLOUDWATCH" ]
}
================================================
FILE: AWS/legos/aws_get_alarms_list/aws_get_alarms_list.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, List
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region of the cloudwatch.')
alarm_name: Optional[str] = Field(
title='Alarm Name',
description='Name of the particular alarm in the cloudwatch.')
def aws_get_alarms_list_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_alarms_list(handle, region: str, alarm_name: str = None) -> List:
"""aws_get_alarms_list get AWS cloudwatches alarms list.
for a given instance ID. This routine assume instance_id
being present in the inputParmsJson.
:type handle: object
:param handle: Object returned from task.validate(...).
:type alarm_name: string
:param alarm_name: Name of the particular alarm in the cloudwatch.
:type region: string
:param region: AWS Region of the cloudwatch.
:rtype: Returns alarms dict list and next token if pagination requested.
"""
# Input param validation.
cloudwatchClient = handle.client('cloudwatch', region_name=region)
result = []
# if alarm is specified it's returning only it's details
if alarm_name is not None:
res = aws_get_paginator(
cloudwatchClient,
"describe_alarms",
"MetricAlarms",
AlarmNames=[alarm_name]
)
else:
res = aws_get_paginator(cloudwatchClient, "describe_alarms", "MetricAlarms")
for alarm in res:
alarm_info = {}
alarm_info['AlarmName'] = alarm['AlarmName']
alarm_info['AlarmArn'] = alarm['AlarmArn']
alarm_info['Dimensions'] = alarm['Dimensions']
if 'AlarmDescription' in alarm:
alarm_info['AlarmDescription'] = alarm['AlarmDescription']
else:
alarm_info['AlarmDescription'] = ""
result.append(alarm_info)
return result
================================================
FILE: AWS/legos/aws_get_alb_listeners_without_http_redirect/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_alb_listeners_without_http_redirect/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_alb_listeners_without_http_redirect/aws_get_alb_listeners_without_http_redirect.json
================================================
{
"action_title": "Get AWS ALB Listeners Without HTTP Redirection",
"action_description": "Get AWS ALB Listeners Without HTTP Redirection",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_alb_listeners_without_http_redirect",
"action_needs_credential": true,
"action_is_check": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ELB" ],
"action_next_hop": ["7d87da036fb983f7909a22a01529790dddc5179ebbb8f95517a66314d236555c"],
"action_next_hop_parameter_mapping": {"7d87da036fb983f7909a22a01529790dddc5179ebbb8f95517a66314d236555c": {"name": "Enforce HTTP Redirection across all AWS ALB instances","region":".[0].region","alb_listener_arns":"map(.listener_arn)"}}
}
================================================
FILE: AWS/legos/aws_get_alb_listeners_without_http_redirect/aws_get_alb_listeners_without_http_redirect.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.legos.aws.aws_list_application_loadbalancers.aws_list_application_loadbalancers import aws_list_application_loadbalancers
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='AWS Region of the ALB listeners.')
def aws_get_alb_listeners_without_http_redirect_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_alb_listeners_without_http_redirect(handle, region: str = "") -> Tuple:
"""aws_get_alb_listeners_without_http_redirect List of ALB listeners without HTTP redirection.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: Region to filter ALB listeners.
:rtype: Tuple of status result and list of ALB listeners without HTTP redirection.
"""
result = []
all_regions = [region]
alb_list = []
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
alb_dict = {}
loadbalancer_arn = aws_list_application_loadbalancers(handle, reg)
alb_dict["region"] = reg
alb_dict["alb_arn"] = loadbalancer_arn
alb_list.append(alb_dict)
except Exception:
pass
for alb in alb_list:
try:
ec2Client = handle.client('elbv2', region_name=alb["region"])
for load in alb["alb_arn"]:
response = aws_get_paginator(ec2Client, "describe_listeners", "Listeners",
LoadBalancerArn=load)
for listner in response:
if 'SslPolicy' not in listner:
resp = aws_get_paginator(ec2Client, "describe_rules", "Rules",
ListenerArn=listner['ListenerArn'])
for rule in resp:
for action in rule['Actions']:
listener_dict = {}
if action['Type'] != 'redirect':
listener_dict["region"] = alb["region"]
listener_dict["listener_arn"] = listner['ListenerArn']
result.append(listener_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_get_all_ec2_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://unskript.com)
================================================
FILE: AWS/legos/aws_get_all_ec2_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_all_ec2_instances/aws_get_all_ec2_instances.json
================================================
{
"action_title": "Get AWS EC2 Instances All ",
"action_description": "Use This Action to Get All AWS EC2 Instances",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_all_ec2_instances",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_get_all_ec2_instances/aws_get_all_ec2_instances.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region of the ECS service.')
def aws_get_all_ec2_instances_printer(output):
if output is None:
return
pprint.pprint({"Instances": output})
def aws_get_all_ec2_instances(handle, region: str) -> List:
"""aws_get_all_ec2_instances Returns an array of instances.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: Region to filter instances.
:rtype: Array of instances.
"""
ec2Client = handle.client('ec2', region_name=region)
res = aws_get_paginator(ec2Client, "describe_instances", "Reservations")
result = []
for reservation in res:
for instance in reservation['Instances']:
result.append(instance['InstanceId'])
return result
================================================
FILE: AWS/legos/aws_get_all_load_balancers/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_all_load_balancers/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_all_load_balancers/aws_get_all_load_balancers.json
================================================
{
"action_title": "AWS Get All Load Balancers",
"action_description": "AWS Get All Load Balancers",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_all_load_balancers",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ELB"]
}
================================================
FILE: AWS/legos/aws_get_all_load_balancers/aws_get_all_load_balancers.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, List
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
region: Optional[str] = Field(
title='AWS Region',
description='AWS Region.'
)
def aws_get_all_load_balancers_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_all_load_balancers(handle, region: str = "") -> List:
"""aws_get_all_load_balancers Returns an list of load balancer details.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: AWS Region.
:rtype: List of load balancer details.
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
elb_Client = handle.client('elbv2', region_name=reg)
response = aws_get_paginator(elb_Client, "describe_load_balancers", "LoadBalancers")
for lb in response:
elb_dict = {}
elb_dict["load_balancer_name"] = lb['LoadBalancerName']
elb_dict["load_balancer_arn"] = lb['LoadBalancerArn']
elb_dict["load_balancer_type"] = lb['Type']
elb_dict["load_balancer_dns"] = lb['DNSName']
elb_dict["region"] = reg
result.append(elb_dict)
except Exception:
pass
return result
================================================
FILE: AWS/legos/aws_get_all_service_names/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_all_service_names/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_all_service_names/aws_get_all_service_names.json
================================================
{
"action_title": "AWS Get All Service Names v3",
"action_description": "Get a list of all service names in a region",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_all_service_names",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS" ,"CATEGORY_TYPE_AWS" ]
}
================================================
FILE: AWS/legos/aws_get_all_service_names/aws_get_all_service_names.py
================================================
from __future__ import annotations
import pprint
from typing import List
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from beartype import beartype
class InputSchema(BaseModel):
region: str = Field(..., description='The AWS Regiob', title='region')
@beartype
def aws_get_all_service_names_printer(output):
if output is None:
return
pprint.pprint(output)
@beartype
def aws_get_all_service_names(handle, region:str) -> List:
sqClient = handle.client('service-quotas',region_name=region)
resPaginate = aws_get_paginator(sqClient,'list_services','Services',PaginationConfig={
'MaxItems': 1000,
'PageSize': 100
})
#res = sqClient.list_services(MaxResults = 100)
return resPaginate
================================================
FILE: AWS/legos/aws_get_all_untagged_resources/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_all_untagged_resources/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_all_untagged_resources/aws_get_all_untagged_resources.json
================================================
{
"action_title": "AWS Get Untagged Resources",
"action_description": "AWS Get Untagged Resources",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_all_untagged_resources",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_COST_OPT" ,"CATEGORY_TYPE_AWS" ]
}
================================================
FILE: AWS/legos/aws_get_all_untagged_resources/aws_get_all_untagged_resources.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region.')
def aws_get_all_untagged_resources_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_all_untagged_resources(handle, region: str) -> List:
"""aws_get_all_untagged_resources Returns an List of Untagged Resources.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: str
:param region: Region to filter resources.
:rtype: List of untagged resources.
"""
ec2Client = handle.client('resourcegroupstaggingapi', region_name=region)
result = []
try:
response = aws_get_paginator(ec2Client, "get_resources", "ResourceTagMappingList")
for resources in response:
if not resources["Tags"]:
result.append(resources["ResourceARN"])
except Exception as error:
result.append({"error":error})
return result
================================================
FILE: AWS/legos/aws_get_auto_scaling_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_auto_scaling_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_auto_scaling_instances/aws_get_auto_scaling_instances.json
================================================
{
"action_title": "Get AWS AutoScaling Group Instances",
"action_description": "Use This Action to Get AWS AutoScaling Group Instances",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_auto_scaling_instances",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2","CATEGORY_TYPE_AWS_ASG" ]
}
================================================
FILE: AWS/legos/aws_get_auto_scaling_instances/aws_get_auto_scaling_instances.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from typing import List
from pydantic import BaseModel, Field
from tabulate import tabulate
class InputSchema(BaseModel):
instance_ids: list = Field(
title='Instance IDs',
description='List of instances.')
region: str = Field(
title='Region',
description='AWS Region of the ECS service.')
def aws_get_auto_scaling_instances_printer(output):
if output is None:
return
print(tabulate(output, headers='keys'))
def aws_get_auto_scaling_instances(handle, instance_ids: list, region: str) -> List:
"""aws_get_auto_scaling_instances List of Dict with instanceId and attached groups.
:type handle: object
:param handle: Object returned from task.validate(...).
:type instance_ids: list
:param instance_ids: List of instances.
:type region: string
:param region: Region to filter instances.
:rtype: List of Dict with instanceId and attached groups.
"""
result = []
ec2Client = handle.client('autoscaling', region_name=region)
try:
response = ec2Client.describe_auto_scaling_instances(InstanceIds=instance_ids)
for group in response["AutoScalingInstances"]:
group_dict = {}
group_dict["InstanceId"] = group["InstanceId"]
group_dict["AutoScalingGroupName"] = group["AutoScalingGroupName"]
result.append(group_dict)
except Exception as error:
err = {"Error":error}
result.append(err)
return result
================================================
FILE: AWS/legos/aws_get_bucket_size/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_bucket_size/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_bucket_size/aws_get_bucket_size.json
================================================
{
"action_title": "Get AWS Bucket Size",
"action_description": "Get an AWS Bucket Size",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_bucket_size",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_S3" ]
}
================================================
FILE: AWS/legos/aws_get_bucket_size/aws_get_bucket_size.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
import datetime
from pydantic import BaseModel, Field
from boto3.session import Session
## FIXME: make this a JSON schema rather than class
class InputSchema(BaseModel):
bucketName: str = Field(
title='Bucket Name',
description='Name of the bucket.')
def aws_get_bucket_size_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_bucket_size(handle: Session, bucketName: str) -> str:
"""aws_get_bucket_size Returns the size of the bucket.
:type handle: Session
:param handle: Handle to the boto3 session
:type bucketName: string
:param bucketName: Name of the bucket
:rtype: String with the size of the bucket.
"""
now = datetime.datetime.now()
# Need to get the region of the bucket first.
s3Client = handle.client('s3')
try:
bucketLocationResp = s3Client.get_bucket_location(
Bucket=bucketName
)
print("location of bucket: ", bucketLocationResp)
except Exception as e:
print(f"Could not get location for bucket {bucketName}, error {e}")
raise e
region = bucketLocationResp['LocationConstraint']
cw = handle.client('cloudwatch', region_name=region)
# Gets the corresponding metrics from CloudWatch for bucket
response = cw.get_metric_statistics(Namespace='AWS/S3',
MetricName='BucketSizeBytes',
Dimensions=[
{'Name': 'BucketName', 'Value': bucketName},
{'Name': 'StorageType', 'Value': 'StandardStorage'}
],
Statistics=['Average'],
Period=3600,
StartTime=(now - datetime.timedelta(days=7)).isoformat(),
EndTime=now.isoformat()
)
print(response)
for res in response["Datapoints"]:
return str(f"{int(res['Average'])}").rjust(25)
# Note the use of "{:,}".format.
# This is a new shorthand method to format output.
================================================
FILE: AWS/legos/aws_get_cloudwatch_ebs/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_cloudwatch_ebs/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_cloudwatch_ebs/aws_get_cloudwatch_ebs.json
================================================
{
"action_title": "Get AWS EBS Metrics from Cloudwatch",
"action_description": "Get AWS CloudWatch Statistics for EBS volumes",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_cloudwatch_ebs",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EBS","CATEGORY_TYPE_AWS_CLOUDWATCH" ]
}
================================================
FILE: AWS/legos/aws_get_cloudwatch_ebs/aws_get_cloudwatch_ebs.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, List
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
import matplotlib.pyplot as plt
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
from unskript.enums.aws_cloudwatch_enums import EBSMetrics
from unskript.enums.aws_k8s_enums import StatisticsType
from tabulate import tabulate
class InputSchema(BaseModel):
volumes: List[str] = Field(
title="Volume",
description="List of EBS volumes",
)
metric_name: EBSMetrics = Field(
title="Metric",
description=("The name of the EBS metric. Eg VolumeReadBytes|VolumeWriteBytes|VolumeReadOps"
"|VolumeWriteOps|VolumeTotalReadTime|VolumeTotalWriteTime|VolumeIdleTime"
"|VolumeQueueLength|VolumeThroughputPercentage|VolumeConsumedReadWriteOps|BurstBalance")
)
period: Optional[int] = Field(
default=60,
title="Period",
description="The granularity, in seconds, of the returned data points.",
)
timeSince: int = Field(
default=3600,
title="Time Since",
description=("Starting from now, window (in seconds) for which you want to get"
" the datapoints for.")
)
statistics: StatisticsType = Field(
title="Statistics",
description=("Cloudwatch metric statistics. Possible values: "
"SampleCount, Average, Sum, Minimum, Maximum.")
)
region: str = Field(
title="Region",
description="AWS Region of the cloudwatch.")
def aws_get_cloudwatch_ebs_printer(output):
if output is None:
return
plt.show()
pprint.pprint(output)
def aws_get_cloudwatch_ebs(
hdl: Session,
metric_name: EBSMetrics,
volumes: List[str],
region: str,
timeSince: int,
statistics: StatisticsType,
period: int = 60,
) -> str:
"""aws_get_cloudwatch_ebs shows plotted AWS cloudwatch statistics for ebs.
:type metric_name: ApplicationELBMetrics
:param metric_name: The name of the metric, with or without spaces.
:type volumes: List[str]
:param volumes: List of EBS volumes
:type period: int
:param period: The granularity, in seconds, of the returned data points.
:type timeSince: int
:param timeSince: Starting from now, window (in seconds) for which you
want to get the datapoints for.
:type statistics: StatisticsType
:param statistics: Cloudwatch metric statistics. Possible values: SampleCount,
Average, Sum, Minimum, Maximum.
:type region: string
:param region: AWS Region of the cloudwatch.
:rtype: Shows ploted statistics.
"""
metric_name = metric_name.value if metric_name else None
statistics = statistics.value if statistics else None
cloudwatchClient = hdl.client("cloudwatch", region_name=region)
name_space = "AWS/EBS"
dimensions = [{"Name": "VolumeId", "Value": v}
for v in volumes]
# Gets metric statistics.
res = cloudwatchClient.get_metric_statistics(
Namespace=name_space,
MetricName=metric_name,
Dimensions=dimensions,
Period=period,
StartTime=datetime.utcnow() - timedelta(seconds=timeSince),
EndTime=datetime.utcnow(),
Statistics=[statistics],
)
data = {}
for datapoints in res["Datapoints"]:
data[datapoints["Timestamp"]] = datapoints[statistics]
# Sorts data.
data_keys = data.keys()
times_stamps = list(data_keys)
times_stamps.sort()
sorted_values = []
table_data = []
for value in times_stamps:
table_data.append([value, data[value]])
sorted_values.append(data[value])
head = ["Timestamp", "Value"]
table = tabulate(table_data, headers=head, tablefmt="grid")
# Puts datapoints into the plot.
plt.plot_date(times_stamps, sorted_values, "-o")
return table
================================================
FILE: AWS/legos/aws_get_cloudwatch_ec2/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_cloudwatch_ec2/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_cloudwatch_ec2/aws_get_cloudwatch_ec2.json
================================================
{
"action_title": "Get AWS EC2 Metrics from Cloudwatch",
"action_description": "Get AWS CloudWatch Metrics for EC2 instances. These could be CPU, Network, Disk based measurements",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_cloudwatch_ec2",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2","CATEGORY_TYPE_AWS_CLOUDWATCH" ]
}
================================================
FILE: AWS/legos/aws_get_cloudwatch_ec2/aws_get_cloudwatch_ec2.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
import matplotlib.pyplot as plt
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
from unskript.enums.aws_k8s_enums import StatisticsType
from unskript.enums.aws_cloudwatch_enums import EC2Metrics
from tabulate import tabulate
class InputSchema(BaseModel):
instance: str = Field(
title="Instances",
description="AWS EC2 instance ID. Eg. i-abcd",
)
metric_name: EC2Metrics = Field(
title="Metric",
description=("The name of the metric. Eg CPUUtilization|DiskReadOps|DiskWriteOps"
"|DiskReadBytes|DiskWriteBytes|MetadataNoToken|NetworkIn|NetworkOut"
"|NetworkPacketsIn|NetworkPacketsOut")
)
period: Optional[int] = Field(
default=60,
title="Period",
description="The granularity, in seconds, of the returned data points.",
)
timeSince: int = Field(
default=3600,
title="Time Since",
description=("Starting from now, window (in seconds) for which you want to get "
"the datapoints for.")
)
statistics: StatisticsType = Field(
title="Statistics",
description=("Cloudwatch metric statistics. Possible values: SampleCount, Average, "
"Sum, Minimum, Maximum.")
)
region: str = Field(
title="Region",
description="AWS Region of the cloudwatch.")
def aws_get_cloudwatch_ec2_printer(output):
if output is None:
return
plt.show()
pprint.pprint(output)
def aws_get_cloudwatch_ec2(
hdl: Session,
instance: str,
metric_name: EC2Metrics,
region: str,
timeSince: int,
statistics: StatisticsType,
period: int = 60,
) -> str:
"""aws_get_cloudwatch_ec2 shows plotted AWS cloudwatch statistics for ec2.
:type metric_name: ApplicationELBMetrics
:param metric_name: The name of the metric, with or without spaces.
:type instance: string
:param instance: AWS EC2 instance ID.
:type period: int
:param period: The granularity, in seconds, of the returned data points.
:type timeSince: int
:param timeSince: Starting from now, window (in seconds) for which you
want to get the datapoints for.
:type statistics: StatisticsType
:param statistics: Cloudwatch metric statistics. Possible values: SampleCount,
Average, Sum, Minimum, Maximum.
:type region: string
:param region: AWS Region of the cloudwatch.
:rtype: Shows ploted statistics.
"""
metric_name = metric_name.value if metric_name else None
statistics = statistics.value if statistics else None
cloudwatchClient = hdl.client("cloudwatch", region_name=region)
name_space = "AWS/EC2"
dimensions = [{"Name": "InstanceId", "Value": instance}]
# Gets metric statistics.
res = cloudwatchClient.get_metric_statistics(
Namespace=name_space,
MetricName=metric_name,
Dimensions=dimensions,
Period=period,
StartTime=datetime.utcnow() - timedelta(seconds=timeSince),
EndTime=datetime.utcnow(),
Statistics=[statistics],
)
data = {}
table_data = []
for datapoints in res["Datapoints"]:
data[datapoints["Timestamp"]] = datapoints[statistics]
# Sorts data.
data_keys = data.keys()
times_stamps = list(data_keys)
times_stamps.sort()
sorted_values = []
for value in times_stamps:
table_data.append([value, data[value]])
sorted_values.append(data[value])
head = ["Timestamp", "Value"]
table = tabulate(table_data, headers=head, tablefmt="grid")
# Puts datapoints into the plot.
plt.plot_date(times_stamps, sorted_values, "-o")
return table
================================================
FILE: AWS/legos/aws_get_cloudwatch_ec2_cpuutil/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_cloudwatch_ec2_cpuutil/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_cloudwatch_ec2_cpuutil/aws_get_cloudwatch_ec2_cpuutil.json
================================================
{
"action_title": "Get AWS EC2 CPU Utilization Statistics from Cloudwatch",
"action_description": "Get AWS CloudWatch Statistics for cpu utilization for EC2 instances",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_cloudwatch_ec2_cpuutil",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2","CATEGORY_TYPE_AWS_CLOUDWATCH" ]
}
================================================
FILE: AWS/legos/aws_get_cloudwatch_ec2_cpuutil/aws_get_cloudwatch_ec2_cpuutil.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from datetime import datetime, timedelta
from typing import Optional
import matplotlib.pyplot as plt
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
from unskript.enums.aws_k8s_enums import StatisticsType
from tabulate import tabulate
class InputSchema(BaseModel):
instance: str = Field(
title="Instance",
description="AWS EC2 instance ID. Eg. i-abcd",
)
period: Optional[int] = Field(
default=60,
title="Period",
description="The granularity, in seconds, of the returned data points.",
)
timeSince: int = Field(
default=3600,
title="Time Since",
description=("Starting from now, window (in seconds) for which you want to get "
"the datapoints for.")
)
statistics: StatisticsType = Field(
default=StatisticsType.AVERAGE,
title="Statistics",
description=("Cloudwatch metric statistics. Possible values: SampleCount, Average, "
"Sum, Minimum, Maximum.")
)
region: str = Field(
title="Region",
description="AWS Region of the cloudwatch.")
def aws_get_cloudwatch_ec2_cpuutil_printer(output):
if output is None:
return
print("\n")
pprint.pprint(output)
def aws_get_cloudwatch_ec2_cpuutil(
hdl: Session,
instance: str,
region: str,
timeSince: int = 3600,
statistics: StatisticsType = StatisticsType.AVERAGE,
period: int = 60,
) -> str:
"""aws_get_cloudwatch_ec2_cpuutil shows plotted AWS cloudwatch statistics
for ec2 cpu utilization.
:type instance: string
:param instance: AWS EC2 instance ID.
:type period: int
:param period: The granularity, in seconds, of the returned data points.
:type timeSince: int
:param timeSince: Starting from now, window (in seconds) for which you
want to get the datapoints for.
:type statistics: StatisticsType
:param statistics: Cloudwatch metric statistics. Possible values: SampleCount,
Average, Sum, Minimum, Maximum.
:type region: string
:param region: AWS Region of the cloudwatch.
:rtype: Shows ploted statistics.
"""
cloudwatchClient = hdl.client("cloudwatch", region_name=region)
name_space = "AWS/EC2"
dimensions = [{"Name": "InstanceId", "Value": instance}]
metric_name = "CPUUtilization"
# Gets metric statistics.
res = cloudwatchClient.get_metric_statistics(
Namespace=name_space,
MetricName=metric_name,
Dimensions=dimensions,
Period=period,
StartTime=datetime.utcnow() - timedelta(seconds=timeSince),
EndTime=datetime.utcnow(),
Statistics=[statistics.value],
)
data = {}
table_data = []
for datapoints in res["Datapoints"]:
data[datapoints["Timestamp"]] = datapoints[statistics.value]
# Sorts data.
data_keys = data.keys()
times_stamps = list(data_keys)
times_stamps.sort()
sorted_values = []
for value in times_stamps:
table_data.append([value, data[value]])
sorted_values.append(data[value])
head = ["Timestamp", "Value"]
table = tabulate(table_data, headers=head, tablefmt="grid")
# Puts datapoints into the plot.
plt.plot_date(times_stamps, sorted_values, "-o")
return table
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_applicationelb/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_applicationelb/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_applicationelb/aws_get_cloudwatch_metrics_applicationelb.json
================================================
{
"action_title": "Get AWS CloudWatch Metrics for AWS/ApplicationELB",
"action_description": "Get AWS CloudWatch Metrics for AWS/ApplicationELB",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_cloudwatch_metrics_applicationelb",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ELB","CATEGORY_TYPE_AWS_CLOUDWATCH" ]
}
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_applicationelb/aws_get_cloudwatch_metrics_applicationelb.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, List
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
import matplotlib.pyplot as plt
from tabulate import tabulate
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
from unskript.enums.aws_cloudwatch_enums import ApplicationELBMetrics
from unskript.enums.aws_k8s_enums import StatisticsType
class InputSchema(BaseModel):
metric_name: ApplicationELBMetrics = Field(
title="Metric Name",
description="The name of the metric, with or without spaces.",
)
dimensions: List[dict] = Field(
title="Dimensions",
description="A dimension is a name/value pair that is part of the identity of a metric.",
)
period: Optional[int] = Field(
60,
title="Period",
description="The granularity, in seconds, of the returned data points.",
)
timeSince: int = Field(
title="Time Since",
description=("Starting from now, window (in seconds) for which you want "
"to get the datapoints for.")
)
statistics: StatisticsType = Field(
title="Statistics",
description=("Cloudwatch metric statistics. Possible values: Average, Sum, "
"Minimum, Maximum.")
)
region: str = Field(
title="Region", description="AWS Region of the cloudwatch.")
def aws_get_cloudwatch_metrics_applicationelb_printer(output):
if output is None:
return
plt.show()
pprint.pprint(output)
def aws_get_cloudwatch_metrics_applicationelb(
hdl: Session,
metric_name: ApplicationELBMetrics,
dimensions: List[dict],
timeSince: int,
statistics: StatisticsType,
region: str,
period: int = 60,
) -> str:
"""aws_get_cloudwatch_metrics_applicationelb shows plotted AWS cloudwatch statistics
for Application ELB.
:type metric_name: ApplicationELBMetrics
:param metric_name: The name of the metric, with or without spaces.
:type dimensions: List[dict]
:param dimensions: A dimension is a name/value pair that is part of the
identity of a metric.
:type period: int
:param period: The granularity, in seconds, of the returned data points.
:type timeSince: int
:param timeSince: Starting from now, window (in seconds) for which you want to
get the datapoints for.
:type statistics: StatisticsType
:param statistics: Cloudwatch metric statistics. Possible values: SampleCount,
Average, Sum, Minimum, Maximum.
:type region: string
:param region: AWS Region of the cloudwatch.
:rtype: Shows ploted statistics.
"""
metric_name = metric_name.value if metric_name else None
statistics = statistics.value if statistics else None
cloudwatchClient = hdl.client("cloudwatch", region_name=region)
# Gets metric data.
res = cloudwatchClient.get_metric_data(
MetricDataQueries=[
{
'Id': metric_name.lower(),
'MetricStat': {
'Metric': {
'Namespace': 'AWS/ApplicationELB',
'MetricName': metric_name,
'Dimensions': dimensions
},
'Period': period,
'Stat': statistics,
},
},
],
StartTime=datetime.utcnow() - timedelta(seconds=timeSince),
EndTime=datetime.utcnow(),
ScanBy='TimestampAscending'
)
timestamps = []
values = []
for timestamp in res['MetricDataResults'][0]['Timestamps']:
timestamps.append(timestamp)
for value in res['MetricDataResults'][0]['Values']:
values.append(value)
timestamps.sort()
values.sort()
plt.plot_date(timestamps, values, "-o")
data = []
for dt, val in zip(
res['MetricDataResults'][0]['Timestamps'],
res['MetricDataResults'][0]['Values']
):
data.append([dt.strftime('%Y-%m-%d::%H-%M'), val])
head = ["Timestamp", "Value"]
table = tabulate(data, headers=head, tablefmt="grid")
return table
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_classic_elb/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_classic_elb/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_classic_elb/aws_get_cloudwatch_metrics_classic_elb.json
================================================
{
"action_title": "Get AWS CloudWatch Metrics for AWS/ELB",
"action_description": "Get AWS CloudWatch Metrics for Classic Loadbalancer",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_cloudwatch_metrics_classic_elb",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ELB","CATEGORY_TYPE_AWS_CLOUDWATCH" ]
}
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_classic_elb/aws_get_cloudwatch_metrics_classic_elb.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, List
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
import matplotlib.pyplot as plt
from tabulate import tabulate
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
from unskript.enums.aws_k8s_enums import StatisticsType
from unskript.enums.aws_cloudwatch_enums import UnitsType, ClassicELBMetrics
def aws_get_cloudwatch_metrics_classic_elb_printer(output):
if output is None:
return
plt.show()
pprint.pprint(output)
class InputSchema(BaseModel):
metric_name: ClassicELBMetrics = Field(
title="Metric Name",
description="The name of the metric, with or without spaces.",
)
dimensions: List[dict] = Field(
title="Dimensions",
description="A dimension is a name/value pair that is part of the identity of a metric.",
)
period: Optional[int] = Field(
60,
title="Period",
description="The granularity, in seconds, of the returned data points.",
)
timeSince: int = Field(
title="Time Since",
description=("Starting from now, window (in seconds) for which you want to get "
"the datapoints for.")
)
statistics: StatisticsType = Field(
title="Statistics",
description=("Cloudwatch metric statistics. Possible values: Average, Sum, "
"Minimum, Maximum.")
)
units: Optional[UnitsType] = Field(
title="Units",
description="Unit of measure.",
)
region: str = Field(
title="Region", description="AWS Region of the cloudwatch.")
def aws_get_cloudwatch_metrics_classic_elb(
hdl: Session,
metric_name: ClassicELBMetrics,
dimensions: List[dict],
timeSince: int,
statistics: StatisticsType,
region: str,
units: UnitsType,
period: int = 60,
) -> str:
"""aws_get_cloudwatch_metrics_ClassicELB shows plotted AWS cloudwatch
statistics for Classic ELB.
:type metric_name: ClassicELBMetrics
:param metric_name: The name of the metric, with or without spaces.
:type dimensions: List[dict]
:param dimensions: A dimension is a name/value pair that is part of
the identity of a metric.
:type period: int
:param period: The granularity, in seconds, of the returned data points.
:type timeSince: int
:param timeSince: Starting from now, window (in seconds) for which you
want to get the datapoints for.
:type statistics: StatisticsType
:param statistics: Cloudwatch metric statistics. Possible values: SampleCount,
Average, Sum, Minimum, Maximum.
:type region: string
:param region: AWS Region of the cloudwatch.
:type units: UnitsType
:param units: Unit of measure.
:rtype: Shows ploted statistics.
"""
metric_name = metric_name.value if metric_name else None
statistics = statistics.value if statistics else None
units = units.value if units else None
cloudwatchClient = hdl.client("cloudwatch", region_name=region)
# Gets metric data.
res = cloudwatchClient.get_metric_data(
MetricDataQueries=[
{
'Id': metric_name.lower(),
'MetricStat': {
'Metric': {
'Namespace': 'AWS/ELB',
'MetricName': metric_name,
'Dimensions': dimensions
},
'Period': period,
'Stat': statistics,
'Unit': units
},
},
],
StartTime=datetime.utcnow() - timedelta(seconds=timeSince),
EndTime=datetime.utcnow(),
ScanBy='TimestampAscending')
timestamps = []
values = []
for i in res['MetricDataResults'][0]['Timestamps']:
dt = i
timestamps.append(i)
for j in res['MetricDataResults'][0]['Values']:
values.append(j)
timestamps.sort()
values.sort()
plt.plot_date(timestamps, values, "-o")
data = []
for dt, val in zip(
res['MetricDataResults'][0]['Timestamps'],
res['MetricDataResults'][0]['Values']
):
data.append([dt.strftime('%Y-%m-%d::%H-%M'), val])
head = ["Timestamp", "Value"]
table = tabulate(data, headers=head, tablefmt="grid")
return table
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_dynamodb/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_ec2autoscaling/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_ec2autoscaling/aws_get_cloudwatch_metrics_ec2autoscaling.json
================================================
{
"action_title": "Get AWS CloudWatch Metrics for AWS/AutoScaling",
"action_description": "Get AWS CloudWatch Metrics for AWS EC2 AutoScaling groups",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_cloudwatch_metrics_ec2autoscaling",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2","CATEGORY_TYPE_AWS_CLOUDWATCH" ]
}
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_ec2autoscaling/aws_get_cloudwatch_metrics_ec2autoscaling.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, List
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
import matplotlib.pyplot as plt
from tabulate import tabulate
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
from unskript.enums.aws_cloudwatch_enums import EC2AutoscalingMetrics
from unskript.enums.aws_k8s_enums import StatisticsType
class InputSchema(BaseModel):
metric_name: EC2AutoscalingMetrics = Field(
title="Metric Name",
description="The name of the metric, with or without spaces.",
)
dimensions: List[dict] = Field(
title="Dimensions",
description="A dimension is a name/value pair that is part of the identity of a metric.",
)
period: Optional[int] = Field(
60,
title="Period",
description="The granularity, in seconds, of the returned data points.",
)
timeSince: int = Field(
title="Time Since",
description=("Starting from now, window (in seconds) for which you want "
"to get the datapoints for.")
)
statistics: StatisticsType = Field(
title="Statistics",
description="Cloudwatch metric statistics",
)
region: str = Field(
title="Region", description="AWS Region of the cloudwatch.")
def aws_get_cloudwatch_metrics_ec2autoscaling_printer(output):
if output is None:
return
plt.show()
pprint.pprint(output)
def aws_get_cloudwatch_metrics_ec2autoscaling(
hdl: Session,
metric_name: EC2AutoscalingMetrics,
dimensions: List[dict],
timeSince: int,
statistics: StatisticsType,
region: str,
period: int = 60,
) -> str:
"""aws_get_cloudwatch_metrics_ec2autoscaling shows plotted AWS cloudwatch
statistics for EC2 Autoscaling groups.
:type metric_name: EC2AutoscalingMetrics
:param metric_name: The name of the metric, with or without spaces.
:type dimensions: List[dict]
:param dimensions: A dimension is a name/value pair that is part of the
identity of a metric.
:type period: int
:param period: The granularity, in seconds, of the returned data points.
:type timeSince: int
:param timeSince: Starting from now, window (in seconds) for which you want
to get the datapoints for.
:type statistics: StatisticsType
:param statistics: Cloudwatch metric statistics. Possible values: SampleCount,
Average, Sum, Minimum, Maximum.
:type region: string
:param region: AWS Region of the cloudwatch.
:rtype: Shows ploted statistics.
"""
metric_name = metric_name.value if metric_name else None
statistics = statistics.value if statistics else None
cloudwatchClient = hdl.client("cloudwatch", region_name=region)
# Gets metric data.
res = cloudwatchClient.get_metric_data(
MetricDataQueries=[
{
'Id': metric_name.lower(),
'MetricStat': {
'Metric': {
'Namespace': 'AWS/AutoScaling',
'MetricName': metric_name,
'Dimensions': dimensions
},
'Period': period,
'Stat': statistics,
},
},
],
StartTime=datetime.utcnow() - timedelta(seconds=timeSince),
EndTime=datetime.utcnow(),
ScanBy='TimestampAscending'
)
timestamps = []
values = []
for timestamp in res['MetricDataResults'][0]['Timestamps']:
timestamps.append(timestamp)
for value in res['MetricDataResults'][0]['Values']:
values.append(value)
timestamps.sort()
values.sort()
plt.plot_date(timestamps, values, "-o")
data = []
for dt, val in zip(
res['MetricDataResults'][0]['Timestamps'],
res['MetricDataResults'][0]['Values']
):
data.append([dt.strftime('%Y-%m-%d::%H-%M'), val])
head = ["Timestamp", "Value"]
table = tabulate(data, headers=head, tablefmt="grid")
return table
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_gatewayelb/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_gatewayelb/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_gatewayelb/aws_get_cloudwatch_metrics_gatewayelb.json
================================================
{
"action_title": "Get AWS CloudWatch Metrics for AWS/GatewayELB",
"action_description": "Get AWS CloudWatch Metrics for AWS/GatewayELB",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_cloudwatch_metrics_gatewayelb",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ELB","CATEGORY_TYPE_AWS_CLOUDWATCH" ]
}
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_gatewayelb/aws_get_cloudwatch_metrics_gatewayelb.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, List
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
import matplotlib.pyplot as plt
from tabulate import tabulate
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
from unskript.enums.aws_cloudwatch_enums import GatewayELBMetrics
from unskript.enums.aws_k8s_enums import StatisticsType
class InputSchema(BaseModel):
metric_name: GatewayELBMetrics = Field(
title="Metric Name",
description="The name of the metric, with or without spaces.",
)
dimensions: List[dict] = Field(
title="Dimensions",
description="A dimension is a name/value pair that is part of the identity of a metric.",
)
period: Optional[int] = Field(
60,
title="Period",
description="The granularity, in seconds, of the returned data points.",
)
timeSince: int = Field(
title="Time Since",
description=("Starting from now, window (in seconds) for which you want "
"to get the datapoints for.")
)
statistics: StatisticsType = Field(
title="Statistics",
description=("Cloudwatch metric statistics. Possible values: Average, "
"Sum, Minimum, Maximum.")
)
region: str = Field(
title="Region", description="AWS Region of the cloudwatch.")
def aws_get_cloudwatch_metrics_gatewayelb_printer(output):
if output is None:
return
plt.show()
pprint.pprint(output)
def aws_get_cloudwatch_metrics_gatewayelb(
hdl: Session,
metric_name: GatewayELBMetrics,
dimensions: List[dict],
timeSince: int,
statistics: StatisticsType,
region: str,
period: int = 60,
) -> str:
"""aws_get_cloudwatch_metrics_gatewayelb shows plotted AWS cloudwatch
statistics for Gateway ELB.
:type metric_name: GatewayELBMetrics
:param metric_name: The name of the metric, with or without spaces.
:type dimensions: List[dict]
:param dimensions: A dimension is a name/value pair that is part
of the identity of a metric.
:type period: int
:param period: The granularity, in seconds, of the returned data points.
:type timeSince: int
:param timeSince: Starting from now, window (in seconds) for which you
want to get the datapoints for.
:type statistics: StatisticsType
:param statistics: Cloudwatch metric statistics. Possible values: SampleCount,
Average, Sum, Minimum, Maximum.
:type region: string
:param region: AWS Region of the cloudwatch.
:rtype: Shows ploted statistics.
"""
metric_name = metric_name.value if metric_name else None
statistics = statistics.value if statistics else None
cloudwatchClient = hdl.client("cloudwatch", region_name=region)
# Gets metric data.
res = cloudwatchClient.get_metric_data(
MetricDataQueries=[
{
'Id': metric_name.lower(),
'MetricStat': {
'Metric': {
'Namespace': 'AWS/GatewayELB',
'MetricName': metric_name,
'Dimensions': dimensions
},
'Period': period,
'Stat': statistics,
},
},
],
StartTime=datetime.utcnow() - timedelta(seconds=timeSince),
EndTime=datetime.utcnow(),
ScanBy='TimestampAscending'
)
timestamps = []
values = []
for timestamp in res['MetricDataResults'][0]['Timestamps']:
timestamps.append(timestamp)
for value in res['MetricDataResults'][0]['Values']:
values.append(value)
timestamps.sort()
values.sort()
plt.plot_date(timestamps, values, "-o")
data = []
for dt, val in zip(
res['MetricDataResults'][0]['Timestamps'],
res['MetricDataResults'][0]['Values']
):
data.append([dt.strftime('%Y-%m-%d::%H-%M'), val])
head = ["Timestamp", "Value"]
table = tabulate(data, headers=head, tablefmt="grid")
return table
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_lambda/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_lambda/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_lambda/aws_get_cloudwatch_metrics_lambda.json
================================================
{
"action_title": "Get AWS CloudWatch Metrics for AWS/Lambda",
"action_description": "Get AWS CloudWatch Metrics for AWS/Lambda",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_cloudwatch_metrics_lambda",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_LAMBDA","CATEGORY_TYPE_AWS_CLOUDWATCH" ]
}
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_lambda/aws_get_cloudwatch_metrics_lambda.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, List, Dict
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
import matplotlib.pyplot as plt
from tabulate import tabulate
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
from unskript.enums.aws_k8s_enums import StatisticsType
from unskript.enums.aws_cloudwatch_enums import LambdaMetrics
class InputSchema(BaseModel):
metric_name: LambdaMetrics = Field(
title="Metric Name",
description="The name of the metric, with or without spaces.",
)
dimensions: List[dict] = Field(
title="Dimensions",
description="A dimension is a name/value pair that is part of the identity of a metric.",
)
period: Optional[int] = Field(
60,
title="Period",
description="The granularity, in seconds, of the returned data points.",
)
timeSince: int = Field(
title="Time Since",
description=("Starting from now, window (in seconds) for which you want "
"to get the datapoints for.")
)
statistics: StatisticsType = Field(
title="Statistics",
description=("Cloudwatch metric statistics. Possible values: SampleCount, "
"Average, Sum, Minimum, Maximum.")
)
region: str = Field(
title="Region", description="AWS Region of the cloudwatch.")
def aws_get_cloudwatch_metrics_lambda_printer(output) -> str:
if output is None:
return ""
if isinstance(output, Dict):
for key in output:
plt.plot_date(output[key][0], output[key][1], "-o")
pprint.pprint(output[key][2])
plt.show()
else:
plt.plot_date(output[0], output[1], "-o")
pprint.pprint(output[2])
plt.show()
return None
def aws_get_cloudwatch_metrics_lambda(
hdl: Session,
metric_name: LambdaMetrics,
dimensions: List[dict],
timeSince: int,
statistics: StatisticsType,
region: str,
period: int = 60,
) -> List:
"""get_lambda_metrics shows plotted AWS cloudwatch statistics for Lambda.
:type metric_name: LambdaMetrics
:param metric_name: The name of the metric, with or without spaces.
:type dimensions: List[dict]
:param dimensions: A dimension is a name/value pair that is part of
the identity of a metric.
:type period: int
:param period: The granularity, in seconds, of the returned data points.
:type timeSince: int
:param timeSince: Starting from now, window (in seconds) for which you want
to get the datapoints for.
:type statistics: StatisticsType
:param statistics: Cloudwatch metric statistics. Possible values: SampleCount,
Average, Sum, Minimum, Maximum.
:type region: string
:param region: AWS Region of the cloudwatch.
:rtype: Shows plotted statistics.
"""
result = []
cloudwatch_client = hdl.client("cloudwatch", region_name=region)
statistics = statistics.value if statistics else None
metric_name = metric_name.value if metric_name else None
# Gets metric data.
res = cloudwatch_client.get_metric_data(
MetricDataQueries=[
{
'Id': metric_name.lower(),
'MetricStat': {
'Metric': {
'Namespace': 'AWS/Lambda',
'MetricName': metric_name,
'Dimensions': dimensions
},
'Period': period,
'Stat': statistics
},
},
],
StartTime=datetime.utcnow() - timedelta(seconds=timeSince),
EndTime=datetime.utcnow(),
ScanBy='TimestampAscending'
)
timestamps = []
values = []
for timestamp in res['MetricDataResults'][0]['Timestamps']:
timestamps.append(timestamp)
for value in res['MetricDataResults'][0]['Values']:
values.append(value)
timestamps.sort()
values.sort()
data = []
for dt,val in zip(
res['MetricDataResults'][0]['Timestamps'],
res['MetricDataResults'][0]['Values']
):
data.append([dt.strftime('%Y-%m-%d::%H-%M'), val])
head = ["Timestamp", "Value"]
table = tabulate(data, headers=head, tablefmt="grid")
result.append(timestamps)
result.append(values)
result.append(table)
return result
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_network_elb/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_network_elb/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_network_elb/aws_get_cloudwatch_metrics_network_elb.json
================================================
{
"action_title": "Get AWS CloudWatch Metrics for AWS/NetworkELB",
"action_description": "Get AWS CloudWatch Metrics for Network Loadbalancer",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_cloudwatch_metrics_network_elb",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ELB","CATEGORY_TYPE_AWS_CLOUDWATCH" ]
}
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_network_elb/aws_get_cloudwatch_metrics_network_elb.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, List
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
import matplotlib.pyplot as plt
from tabulate import tabulate
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
from unskript.enums.aws_k8s_enums import StatisticsType
from unskript.enums.aws_cloudwatch_enums import UnitsType, NetworkELBMetrics
class InputSchema(BaseModel):
metric_name: NetworkELBMetrics = Field(
title="Metric Name",
description="The name of the metric, with or without spaces.",
)
dimensions: List[dict] = Field(
title="Dimensions",
description="A dimension is a name/value pair that is part of the identity of a metric.",
)
period: Optional[int] = Field(
60,
title="Period",
description="The granularity, in seconds, of the returned data points.",
)
timeSince: int = Field(
title="Time Since",
description=("Starting from now, window (in seconds) for which you want "
"to get the datapoints for.")
)
statistics: StatisticsType = Field(
title="Statistics",
description=("Cloudwatch metric statistics. Possible values: Average, "
"Sum, Minimum, Maximum, SampleCount.")
)
units: Optional[UnitsType] = Field(
title="Units",
description="Unit of measure",
)
region: str = Field(
title="Region", description="AWS Region of the cloudwatch.")
def aws_get_cloudwatch_metrics_network_elb_printer(output):
if output is None:
return
plt.show()
pprint.pprint(output)
def aws_get_cloudwatch_metrics_network_elb(
hdl: Session,
metric_name: NetworkELBMetrics,
dimensions: List[dict],
timeSince: int,
statistics: StatisticsType,
region: str,
units: UnitsType,
period: int = 60,
) -> str:
"""aws_get_cloudwatch_metrics_NetworkELB shows plotted AWS cloudwatch statistics for NetworkELB.
:type metric_name: NetworkELBMetrics
:param metric_name: The name of the metric, with or without spaces.
:type dimensions: List[dict]
:param dimensions: A dimension is a name/value pair that is part of the
identity of a metric.
:type period: int
:param period: The granularity, in seconds, of the returned data points.
:type timeSince: int
:param timeSince: Starting from now, window (in seconds) for which you want
to get the datapoints for.
:type statistics: StatisticsType
:param statistics: Cloudwatch metric statistics. Possible values: SampleCount,
Average, Sum, Minimum, Maximum.
:type region: string
:param region: AWS Region of the cloudwatch.
:type units: UnitsType
:param units: Unit of measure.
:rtype: Shows ploted statistics.
"""
metric_name = metric_name.value if metric_name else None
statistics = statistics.value if statistics else None
units = units.value if units else None
cloudwatchClient = hdl.client("cloudwatch", region_name=region)
# Gets metric data.
res = cloudwatchClient.get_metric_data(
MetricDataQueries=[
{
'Id': metric_name.lower(),
'MetricStat': {
'Metric': {
'Namespace': 'AWS/NetworkELB',
'MetricName': metric_name,
'Dimensions': dimensions
},
'Period': period,
'Stat': statistics,
'Unit': units
},
},
],
StartTime=datetime.utcnow() - timedelta(seconds=timeSince),
EndTime=datetime.utcnow(),
ScanBy='TimestampAscending'
)
timestamps = []
values = []
for i in res['MetricDataResults'][0]['Timestamps']:
dt = i
timestamps.append(dt)
for j in res['MetricDataResults'][0]['Values']:
values.append(j)
timestamps.sort()
values.sort()
plt.plot_date(timestamps, values, "-o")
data = []
for dt, val in zip(
res['MetricDataResults'][0]['Timestamps'],
res['MetricDataResults'][0]['Values']
):
data.append([dt.strftime('%Y-%m-%d::%H-%M'), val])
head = ["Timestamp", "Value"]
table = tabulate(data, headers=head, tablefmt="grid")
return table
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_rds/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_rds/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_rds/aws_get_cloudwatch_metrics_rds.json
================================================
{
"action_title": "Get AWS CloudWatch Metrics for AWS/RDS",
"action_description": "Get AWS CloudWatch Metrics for AWS/RDS",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_cloudwatch_metrics_rds",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_RDS","CATEGORY_TYPE_AWS_CLOUDWATCH" ]
}
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_rds/aws_get_cloudwatch_metrics_rds.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, List
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
import matplotlib.pyplot as plt
from tabulate import tabulate
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
from unskript.enums.aws_cloudwatch_enums import RDSMetrics
from unskript.enums.aws_k8s_enums import StatisticsType
class InputSchema(BaseModel):
metric_name: RDSMetrics = Field(
title="Metric Name",
description="The name of the metric, with or without spaces.",
)
dimensions: List[dict] = Field(
title="Dimensions",
description="A dimension is a name/value pair that is part of the identity of a metric.",
)
period: Optional[int] = Field(
60,
title="Period",
description="The granularity, in seconds, of the returned data points.",
)
timeSince: int = Field(
title="Time Since",
description=("Starting from now, window (in seconds) for which you want "
"to get the datapoints for.")
)
statistics: StatisticsType = Field(
title="Statistics",
description=("Cloudwatch metric statistics. Possible values: Average, "
"Sum, Minimum, Maximum.")
)
region: str = Field(
title="Region", description="AWS Region of the cloudwatch.")
def aws_get_cloudwatch_metrics_rds_printer(output):
if output is None:
return
plt.show()
pprint.pprint(output)
def aws_get_cloudwatch_metrics_rds(
hdl: Session,
metric_name: RDSMetrics,
dimensions: List[dict],
timeSince: int,
statistics: StatisticsType,
region: str,
period: int = 60,
) -> str:
"""aws_get_cloudwatch_metrics_rds shows plotted AWS cloudwatch statistics for RDS.
:type metric_name: RDSMetrics
:param metric_name: The name of the metric, with or without spaces.
:type dimensions: List[dict]
:param dimensions: A dimension is a name/value pair that is part of
the identity of a metric.
:type period: int
:param period: The granularity, in seconds, of the returned data points.
:type timeSince: int
:param timeSince: Starting from now, window (in seconds) for which you want
to get the datapoints for.
:type statistics: StatisticsType
:param statistics: Cloudwatch metric statistics. Possible values: SampleCount,
Average, Sum, Minimum, Maximum.
:type region: string
:param region: AWS Region of the cloudwatch.
:rtype: Shows ploted statistics.
"""
metric_name = metric_name.value if metric_name else None
statistics = statistics.value if statistics else None
cloudwatchClient = hdl.client("cloudwatch", region_name=region)
# Gets metric data.
res = cloudwatchClient.get_metric_data(
MetricDataQueries=[
{
'Id': metric_name.lower(),
'MetricStat': {
'Metric': {
'Namespace': 'AWS/RDS',
'MetricName': metric_name,
'Dimensions': dimensions
},
'Period': period,
'Stat': statistics,
},
},
],
StartTime=datetime.utcnow() - timedelta(seconds=timeSince),
EndTime=datetime.utcnow(),
ScanBy='TimestampAscending'
)
timestamps = []
values = []
for timestamp in res['MetricDataResults'][0]['Timestamps']:
timestamps.append(timestamp)
for value in res['MetricDataResults'][0]['Values']:
values.append(value)
timestamps.sort()
values.sort()
plt.plot_date(timestamps, values, "-o")
data = []
for dt, val in zip(
res['MetricDataResults'][0]['Timestamps'],
res['MetricDataResults'][0]['Values']
):
data.append([dt.strftime('%Y-%m-%d::%H-%M'), val])
head = ["Timestamp", "Value"]
table = tabulate(data, headers=head, tablefmt="grid")
return table
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_redshift/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_redshift/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_redshift/aws_get_cloudwatch_metrics_redshift.json
================================================
{
"action_title": "Get AWS CloudWatch Metrics for AWS/Redshift",
"action_description": "Get AWS CloudWatch Metrics for AWS/Redshift",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_cloudwatch_metrics_redshift",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_REDSHIFT","CATEGORY_TYPE_AWS_CLOUDWATCH" ]
}
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_redshift/aws_get_cloudwatch_metrics_redshift.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, List
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
import matplotlib.pyplot as plt
from tabulate import tabulate
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
from unskript.enums.aws_cloudwatch_enums import RedshiftMetrics
from unskript.enums.aws_k8s_enums import StatisticsType
class InputSchema(BaseModel):
metric_name: RedshiftMetrics = Field(
title="Metric Name",
description="The name of the metric, with or without spaces.",
)
dimensions: List[dict] = Field(
title="Dimensions",
description="A dimension is a name/value pair that is part of the identity of a metric.",
)
period: Optional[int] = Field(
60,
title="Period",
description="The granularity, in seconds, of the returned data points.",
)
timeSince: int = Field(
title="Time Since",
description=("Starting from now, window (in seconds) for which you want "
"to get the datapoints for.")
)
statistics: StatisticsType = Field(
title="Statistics",
description=("Cloudwatch metric statistics. Possible values: Average, "
"Sum, Minimum, Maximum.")
)
region: str = Field(
title="Region", description="AWS Region of the cloudwatch.")
def aws_get_cloudwatch_metrics_redshift_printer(output):
if output is None:
return
plt.show()
pprint.pprint(output)
def aws_get_cloudwatch_metrics_redshift(
hdl: Session,
metric_name: RedshiftMetrics,
dimensions: List[dict],
timeSince: int,
statistics: StatisticsType,
region: str,
period: int = 60,
) -> str:
"""aws_get_cloudwatch_metrics_redshift shows plotted AWS cloudwatch statistics for Redshift.
:type metric_name: RedshiftMetrics
:param metric_name: The name of the metric, with or without spaces.
:type dimensions: List[dict]
:param dimensions: A dimension is a name/value pair that is part of the
identity of a metric.
:type period: int
:param period: The granularity, in seconds, of the returned data points.
:type timeSince: int
:param timeSince: Starting from now, window (in seconds) for which you want to
get the datapoints for.
:type statistics: StatisticsType
:param statistics: Cloudwatch metric statistics. Possible values: SampleCount,
Average, Sum, Minimum, Maximum.
:type region: string
:param region: AWS Region of the cloudwatch.
:rtype: Shows ploted statistics.
"""
metric_name = metric_name.value if metric_name else None
statistics = statistics.value if statistics else None
cloudwatchClient = hdl.client("cloudwatch", region_name=region)
# Gets metric data.
res = cloudwatchClient.get_metric_data(
MetricDataQueries=[
{
'Id': metric_name.lower(),
'MetricStat': {
'Metric': {
'Namespace': 'AWS/Redshift',
'MetricName': metric_name,
'Dimensions': dimensions
},
'Period': period,
'Stat': statistics,
},
},
],
StartTime=datetime.utcnow() - timedelta(seconds=timeSince),
EndTime=datetime.utcnow(),
ScanBy='TimestampAscending'
)
timestamps = []
values = []
for timestamp in res['MetricDataResults'][0]['Timestamps']:
timestamps.append(timestamp)
for value in res['MetricDataResults'][0]['Values']:
values.append(value)
timestamps.sort()
values.sort()
plt.plot_date(timestamps, values, "-o")
data = []
for dt, val in zip(
res['MetricDataResults'][0]['Timestamps'],
res['MetricDataResults'][0]['Values']
):
data.append([dt.strftime('%Y-%m-%d::%H-%M'), val])
head = ["Timestamp", "Value"]
table = tabulate(data, headers=head, tablefmt="grid")
return table
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_sqs/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_sqs/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_sqs/aws_get_cloudwatch_metrics_sqs.json
================================================
{
"action_title": "Get AWS CloudWatch Metrics for AWS/SQS",
"action_description": "Get AWS CloudWatch Metrics for AWS/SQS",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_cloudwatch_metrics_sqs",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_SQS","CATEGORY_TYPE_AWS_CLOUDWATCH" ]
}
================================================
FILE: AWS/legos/aws_get_cloudwatch_metrics_sqs/aws_get_cloudwatch_metrics_sqs.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, List
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
import matplotlib.pyplot as plt
from tabulate import tabulate
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
from unskript.enums.aws_k8s_enums import StatisticsType
from unskript.enums.aws_cloudwatch_enums import UnitsType, SQSMetrics
class InputSchema(BaseModel):
metric_name: SQSMetrics = Field(
title="Metric Name",
description="The name of the metric, with or without spaces.",
)
dimensions: List[dict] = Field(
title="Dimensions",
description="A dimension is a name/value pair that is part of the identity of a metric.",
)
period: Optional[int] = Field(
60,
title="Period",
description="The granularity, in seconds, of the returned data points.",
)
timeSince: int = Field(
title="Time Since",
description=("Starting from now, window (in seconds) for which you want "
"to get the datapoints for.")
)
statistics: StatisticsType = Field(
title="Statistics",
description=("Cloudwatch metric statistics. Possible values: Average, "
"Sum, Minimum, Maximum.")
)
units: UnitsType = Field(
title="Units",
description="Unit of measure.",
)
region: str = Field(
title="Region", description="AWS Region of the cloudwatch.")
def aws_get_cloudwatch_metrics_sqs_printer(output):
if output is None:
return
plt.show()
pprint.pprint(output)
def aws_get_cloudwatch_metrics_sqs(
hdl: Session,
metric_name: SQSMetrics,
dimensions: List[dict],
timeSince: int,
statistics: StatisticsType,
region: str,
units: UnitsType,
period: int = 60,
) -> str:
"""aws_get_cloudwatch_metrics_sqs shows plotted AWS cloudwatch statistics for SQS.
:type metric_name: SQSMetrics
:param metric_name: The name of the metric, with or without spaces.
:type dimensions: List[dict]
:param dimensions: A dimension is a name/value pair that is part of
the identity of a metric.
:type period: int
:param period: The granularity, in seconds, of the returned data points.
:type timeSince: int
:param timeSince: Starting from now, window (in seconds) for which you want
to get the datapoints for.
:type statistics: StatisticsType
:param statistics: Cloudwatch metric statistics. Possible values: SampleCount,
Average, Sum, Minimum, Maximum.
:type region: string
:param region: AWS Region of the cloudwatch.
:type units: UnitsType
:param units: Unit of measure.
:rtype: Shows ploted statistics.
"""
metric_name = metric_name.value if metric_name else None
statistics = statistics.value if statistics else None
units = units.value if units else None
cloudwatchClient = hdl.client("cloudwatch", region_name=region)
# Gets metric data.
res = cloudwatchClient.get_metric_data(
MetricDataQueries=[
{
'Id': metric_name.lower(),
'MetricStat': {
'Metric': {
'Namespace': 'AWS/SQS',
'MetricName': metric_name,
'Dimensions': dimensions
},
'Period': period,
'Stat': statistics,
'Unit': units
},
},
],
StartTime=datetime.utcnow() - timedelta(seconds=timeSince),
EndTime=datetime.utcnow(),
ScanBy='TimestampAscending'
)
timestamps = []
values = []
for i in res['MetricDataResults'][0]['Timestamps']:
dt = i
timestamps.append(i)
for j in res['MetricDataResults'][0]['Values']:
values.append(j)
timestamps.sort()
values.sort()
plt.plot_date(timestamps, values, "-o")
data = []
for dt, val in zip(
res['MetricDataResults'][0]['Timestamps'],
res['MetricDataResults'][0]['Values']
):
data.append([dt.strftime('%Y-%m-%d::%H-%M'), val])
head = ["Timestamp", "Value"]
table = tabulate(data, headers=head, tablefmt="grid")
return table
================================================
FILE: AWS/legos/aws_get_cloudwatch_statistics/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_cloudwatch_statistics/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_cloudwatch_statistics/aws_get_cloudwatch_statistics.json
================================================
{
"action_title": "Get AWS CloudWatch Statistics",
"action_description": "Get AWS CloudWatch Statistics",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_cloudwatch_statistics",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_CLOUDWATCH" ]
}
================================================
FILE: AWS/legos/aws_get_cloudwatch_statistics/aws_get_cloudwatch_statistics.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, List
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
import matplotlib.pyplot as plt
from unskript.legos.aws.aws_get_handle.aws_get_handle import Session
from unskript.enums.aws_k8s_enums import StatisticsType
from tabulate import tabulate
class InputSchema(BaseModel):
name_space: str = Field(
title="Namespace",
description="The namespace of the metric, with or without spaces. For eg: AWS/SQS, AWS/ECS",
)
metric_name: str = Field(
title="Metric Name",
description="The name of the metric, with or without spaces.",
)
dimensions: List[dict] = Field(
title="Dimensions",
description="A dimension is a name/value pair that is part of the identity of a metric.",
)
period: Optional[int] = Field(
60,
title="Period",
description="The granularity, in seconds, of the returned data points.",
)
timeSince: int = Field(
title="Time Since",
description=("Starting from now, window (in seconds) for which you want "
"to get the datapoints for.")
)
statistics: StatisticsType = Field(
title="Statistics",
description=("Cloudwatch metric statistics. Possible values: SampleCount, "
"Average, Sum, Minimum, Maximum.")
)
region: str = Field(
title="Region", description="AWS Region of the cloudwatch.")
def aws_get_cloudwatch_statistics_printer(output):
if output is None:
return
plt.show()
pprint.pprint(output)
def aws_get_cloudwatch_statistics(
hdl: Session,
name_space: str,
metric_name: str,
dimensions: List[dict],
timeSince: int,
statistics: StatisticsType,
region: str,
period: int = 60,
) -> str:
"""aws_get_cloud_statistics shows ploted AWS cloudwatch statistics.
for a given instance ID. This routine assume instance_id
being present in the inputParmsJson.
:type name_space: string
:param name_space: he namespace of the metric, with or without spaces.
For eg: AWS/SQS, AWS/ECS
:type metric_name: string
:param metric_name: The name of the metric, with or without spaces.
:type dimensions: List[dict]
:param dimensions: A dimension is a name/value pair that is part of the
identity of a metric.
:type period: int
:param period: The granularity, in seconds, of the returned data points.
:type timeSince: int
:param timeSince: Starting from now, window (in seconds) for which you want
to get the datapoints for.
:type statistics: StatisticsType
:param statistics: Cloudwatch metric statistics. Possible values: SampleCount,
Average, Sum, Minimum, Maximum.
:type region: string
:param region: AWS Region of the cloudwatch.
:rtype: Shows ploted statistics.
"""
statistics = statistics.value if statistics else None
cloudwatchClient = hdl.client("cloudwatch", region_name=region)
# Gets metric statistics.
res = cloudwatchClient.get_metric_statistics(
Namespace=name_space,
MetricName=metric_name,
Dimensions=dimensions,
Period=period,
StartTime=datetime.utcnow() - timedelta(seconds=timeSince),
EndTime=datetime.utcnow(),
Statistics=[statistics],
)
data = {}
table_data = []
for datapoints in res["Datapoints"]:
data[datapoints["Timestamp"]] = datapoints[statistics]
# Sorts data.
data_keys = data.keys()
times_stamps = list(data_keys)
times_stamps.sort()
sorted_values = []
for value in times_stamps:
table_data.append([value, data[value]])
sorted_values.append(data[value])
head = ["Timestamp", "Value"]
table = tabulate(table_data, headers=head, tablefmt="grid")
# Puts datapoints into the plot.
plt.plot_date(times_stamps, sorted_values, "-o")
return table
================================================
FILE: AWS/legos/aws_get_cost_for_all_services/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_cost_for_all_services/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_cost_for_all_services/aws_get_cost_for_all_services.json
================================================
{
"action_title": "AWS Get Costs For All Services",
"action_description": "Get Costs for all AWS services in a given time period.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_cost_for_all_services",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_COST_EXPLORER"]
}
================================================
FILE: AWS/legos/aws_get_cost_for_all_services/aws_get_cost_for_all_services.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import datetime
from typing import List, Optional
from pydantic import BaseModel, Field
import tabulate
from dateutil.relativedelta import relativedelta
class InputSchema(BaseModel):
number_of_months: Optional[int] = Field(
1,
description=('Number of months to fetch the daily costs for. '
'Eg: 1 (This will fetch all the costs for the last 30 days)'),
title='Number of Months',
)
start_date: Optional[str] = Field(
'',
description=('Start date to get the daily costs from. Note: '
'It should be given in YYYY-MM-DD format. Eg: 2023-04-11'),
title='Start Date',
)
end_date: Optional[str] = Field(
'',
description=('End date till which daily costs are to be fetched. '
'Note: It should be given in YYYY-MM-DD format. Eg: 2023-04-11'),
title='End Date',
)
region: str = Field(..., description='AWS region.', title='Region')
def aws_get_cost_for_all_services_printer(output):
if output is None:
return
rows = [x.values() for x in output]
print(tabulate.tabulate(rows, tablefmt="fancy_grid", headers=['Date','Service','Cost']))
def aws_get_cost_for_all_services(
handle, region:str,
number_of_months: int=1,
start_date: str="",
end_date:str=""
) -> List:
"""aws_get_cost_for_all_services returns cost for all services
:type handle: object
:param handle: Object returned by the task.validate(...) method.
:type number_of_months: list
:param number_of_months: List of instance ids.
:type start_date: list
:param start_date: List of instance ids.
:type end_date: list
:param end_date: List of instance ids.
:type region: string
:param region: Region for instance.
:rtype: List of dicts of all costs per AWS service in a given time period
"""
if number_of_months:
no_of_months = int(number_of_months)
end = datetime.date.today().strftime('%Y-%m-%d')
start = (datetime.date.today() + relativedelta(months=-no_of_months)).strftime('%Y-%m-%d')
elif not start_date and not end_date and not number_of_months:
no_of_months = 1
end = datetime.date.today().strftime('%Y-%m-%d')
start = (datetime.date.today() + relativedelta(months=-no_of_months)).strftime('%Y-%m-%d')
else:
start = start_date
end = end_date
total_cost = 0
result = []
CEclient = handle.client('ce', region_name=region)
try:
response = CEclient.get_cost_and_usage(
TimePeriod = {
'Start': start,
'End': end
},
Granularity='DAILY',
Metrics = [
'UnblendedCost',
],
GroupBy=[
{
'Type': 'DIMENSION',
'Key': 'SERVICE'
},
],
)
except Exception as e:
raise e
for daily_cost in response['ResultsByTime']:
date = daily_cost['TimePeriod']['Start']
for group in daily_cost['Groups']:
cost_est = {}
cost_est["date"] = date
service_name = group['Keys'][0]
service_cost = group['Metrics']['UnblendedCost']['Amount']
cost_est["service_name"] = service_name
cost_est["service_cost"] = service_cost
total_cost += float(service_cost)
result.append(cost_est)
print(f"Total Cost: {total_cost}")
return result
================================================
FILE: AWS/legos/aws_get_cost_for_data_transfer/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_cost_for_data_transfer/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_cost_for_data_transfer/aws_get_cost_for_data_transfer.json
================================================
{
"action_title": "AWS Get Costs For Data Transfer",
"action_description": "Get daily cost for Data Transfer in AWS",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_cost_for_data_transfer",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_COST_EXPLORER"]
}
================================================
FILE: AWS/legos/aws_get_cost_for_data_transfer/aws_get_cost_for_data_transfer.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import datetime
from typing import List, Optional
from pydantic import BaseModel, Field
import tabulate
from dateutil.relativedelta import relativedelta
class InputSchema(BaseModel):
number_of_months: Optional[float] = Field(
'',
description=('Number of months to fetch the daily costs for. '
'Eg: 1 (This will fetch all the costs for the last 30 days)'),
title='Number of Months',
)
start_date: Optional[str] = Field(
'',
description=('Start date to get the daily costs from. Note: '
'It should be given in YYYY-MM-DD format. Eg: 2023-04-11'),
title='Start Date',
)
end_date: Optional[str] = Field(
'',
description=('End date till which daily costs are to be fetched. Note: '
'It should be given in YYYY-MM-DD format. Eg: 2023-04-11'),
title='End Date',
)
region: str = Field(..., description='AWS region.', title='region')
def aws_get_cost_for_data_transfer_printer(output):
if output is None:
return
rows = [x.values() for x in output]
print(tabulate.tabulate(
rows, tablefmt="fancy_grid",
headers=['Date','Usage Type','Total Usage Qty','Total Usage Cost']
))
def aws_get_cost_for_data_transfer(
handle,
region:str,
number_of_months: int="",
start_date: str="",
end_date:str=""
) -> List:
"""aws_get_cost_for_data_trasfer returns daily cost spendings on data transfer
:type handle: object
:param handle: Object returned by the task.validate(...) method.
:type number_of_months: int
:param number_of_months: Optional, Number of months to fetch the daily costs for.
Eg: 1 (This will fetch all the costs for the last 30 days)
:type start_date: string
:param start_date: Optional, Start date to get the daily costs from. Note:
It should be given in YYYY-MM-DD format. Eg: 2023-03-11
:type end_date: string
:param end_date: Optional, End date till which daily costs are to be fetched.
Note: It should be given in YYYY-MM-DD format. Eg: 2023-04-11
:type region: string
:param region: AWS Region.
:type region: string
:param region: Region for instance.
:rtype: List of dicts with data transfer costs
"""
if number_of_months:
no_of_months = int(number_of_months)
end = datetime.date.today().strftime('%Y-%m-%d')
start = (datetime.date.today() + relativedelta(months=-no_of_months)).strftime('%Y-%m-%d')
elif not start_date and not end_date and not number_of_months:
no_of_months = 1
end = datetime.date.today().strftime('%Y-%m-%d')
start = (datetime.date.today() + relativedelta(months=-no_of_months)).strftime('%Y-%m-%d')
else:
start = start_date
end = end_date
result = []
CEclient = handle.client('ce', region_name=region)
try:
response = CEclient.get_cost_and_usage(
TimePeriod={
'Start': start,
'End': end
},
Granularity='DAILY',
Metrics=[
'UsageQuantity',
'BlendedCost',
],
GroupBy=[
{
'Type': 'DIMENSION',
'Key': 'USAGE_TYPE'
},
],
Filter={
'Dimensions': {
'Key': 'USAGE_TYPE',
'Values': [
'DataTransfer-Out-Bytes',
'DataTransfer-In-Bytes',
],
},
},
)
except Exception as e:
raise e
for daily_cost in response['ResultsByTime']:
date = daily_cost['TimePeriod']['Start']
total_cost = 0
total_usage = 0
for group in daily_cost['Groups']:
cost_est = {}
usage_type = group['Keys'][0]
usage_quantity = float(group['Metrics']['UsageQuantity']['Amount']) / (1024 ** 4)
usage_cost = group['Metrics']['BlendedCost']['Amount']
total_usage += usage_quantity
total_cost += float(usage_cost)
cost_est["date"] = date
cost_est["usage_type"] = usage_type
cost_est["total_usage"] = round(total_usage,3)
cost_est["total_cost"] = total_cost
result.append(cost_est)
return result
================================================
FILE: AWS/legos/aws_get_daily_total_spend/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_daily_total_spend/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_daily_total_spend/aws_get_daily_total_spend.json
================================================
{
"action_title": "AWS Get Daily Total Spend",
"action_description": "AWS get daily total spend from Cost Explorer",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_daily_total_spend",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_COST_EXPLORER"]
}
================================================
FILE: AWS/legos/aws_get_daily_total_spend/aws_get_daily_total_spend.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import datetime
from typing import List, Optional
from pydantic import BaseModel, Field
import tabulate
from dateutil.relativedelta import relativedelta
class InputSchema(BaseModel):
number_of_months: Optional[int] = Field(
'',
description=('Number of months to fetch the daily costs for. '
'Eg: 1 (This will fetch all the costs for the last 30 days)'),
title='Number of months',
)
start_date: Optional[str] = Field(
'',
description=('Start date to get the daily costs from. Note: '
'It should be given in YYYY-MM-DD format. Eg: 2023-03-11'),
title='Start Date',
)
end_date: Optional[str] = Field(
'',
description=('End date till which daily costs are to be fetched. '
'Note: It should be given in YYYY-MM-DD format. Eg: 2023-04-11'),
title='End Date',
)
region: str = Field(..., description='AWS region.', title='Region')
def aws_get_daily_total_spend_printer(output):
if output is None:
return
rows = [x.values() for x in output]
print(tabulate.tabulate(rows, tablefmt="fancy_grid", headers=['Date', 'Cost']))
def aws_get_daily_total_spend(
handle, region:str,
number_of_months: int="",
start_date: str="",
end_date:str=""
) -> List:
"""aws_get_daily_total_spend returns daily cost spendings
:type handle: object
:param handle: Object returned by the task.validate(...) method.
:type number_of_months: int
:param number_of_months: Optional, Number of months to fetch the daily costs for.
Eg: 1 (This will fetch all the costs for the last 30 days)
:type start_date: string
:param start_date: Optional, Start date to get the daily costs from.
Note: It should be given in YYYY-MM-DD format. Eg: 2023-03-11
:type end_date: string
:param end_date: Optional, End date till which daily costs are to be fetched.
Note: It should be given in YYYY-MM-DD format. Eg: 2023-04-11
:type region: string
:param region: AWS Region.
:rtype: List of dicts with costs on the respective dates
"""
if number_of_months:
no_of_months = int(number_of_months)
end = datetime.date.today().strftime('%Y-%m-%d')
start = (datetime.date.today() + relativedelta(months=-no_of_months)).strftime('%Y-%m-%d')
elif not start_date and not end_date and not number_of_months:
no_of_months = 1
end = datetime.date.today().strftime('%Y-%m-%d')
start = (datetime.date.today() + relativedelta(months=-no_of_months)).strftime('%Y-%m-%d')
else:
start = start_date
end = end_date
result = []
client = handle.client('ce', region_name=region)
try:
response = client.get_cost_and_usage(
TimePeriod={
'Start': start,
'End': end
},
Granularity='DAILY',
Metrics=[
'BlendedCost',
]
)
except Exception as e:
raise e
for daily_cost in response['ResultsByTime']:
daily_cost_est = {}
date = daily_cost['TimePeriod']['Start']
cost = daily_cost['Total']['BlendedCost']['Amount']
daily_cost_est["date"] = date
daily_cost_est["cost"] = cost
result.append(daily_cost_est)
return result
================================================
FILE: AWS/legos/aws_get_ebs_volume_for_low_usage/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_ebs_volume_for_low_usage/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_ebs_volume_for_low_usage/aws_get_ebs_volume_for_low_usage.json
================================================
{
"action_title": "AWS Get EBS Volumes for Low Usage",
"action_description": "This action list low use volumes from AWS which used <10% capacity from the given threshold days.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_ebs_volume_for_low_usage",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_OBJECT",
"action_is_check":true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS"],
"action_next_hop": ["c9e1563d58cd6e3778a6c3fb11643498e3cdf3965a18c09214423998d62847b8"],
"action_next_hop_parameter_mapping": {"c9e1563d58cd6e3778a6c3fb11643498e3cdf3965a18c09214423998d62847b8": {"name": "Delete EBS Volume With Low Usage", "region": ".[0].region", "volume_ids":"map(.volume_id)"}}
}
================================================
FILE: AWS/legos/aws_get_ebs_volume_for_low_usage/aws_get_ebs_volume_for_low_usage.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='AWS Region.')
threshold_days: Optional[int] = Field(
default=10,
title='Threshold (In days)',
description='(in days) The threshold to check the EBS volume usage within given days.')
threshold_usage_percent: Optional[int] = Field(
default=10,
title='Minium usage percent',
description='This is the threshold usage percent, below which it will be considered a low usage.')
def aws_get_ebs_volume_for_low_usage_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_ebs_volume_for_low_usage(handle, region: str = "", threshold_days: int = 10, threshold_usage_percent: int = 10) -> Tuple:
"""aws_get_ebs_volume_for_low_usage Returns an array of ebs volumes.
:type region: string
:param region: AWS Region.
:type threshold_days: int
:param threshold_days: (in days) The threshold to check the EBS volume usage within given days.
:type threshold_usage_percent: int
:param usage_percent: (in percent) The threshold to compaire the EBS volume usage
less than the threshold.
:rtype: Tuple with status result and list of EBS Volume.
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
# Filtering the volume by region
ec2Client = handle.client('ec2', region_name=reg)
response = aws_get_paginator(ec2Client, "describe_volumes", "Volumes")
now = datetime.utcnow()
days_ago = now - timedelta(days=threshold_days)
# collecting the volumes which has zero attachments
for volume in response:
ebs_volume = {}
volume_id = volume["VolumeId"]
volume_size = volume['Size']
cloudwatch = handle.client('cloudwatch', region_name=reg)
read_metric_data = cloudwatch.get_metric_statistics(
Namespace='AWS/EBS',
MetricName='VolumeReadBytes',
Dimensions=[
{
'Name': 'VolumeId',
'Value': volume_id
}
],
StartTime=days_ago,
EndTime=now,
Period=86400,
Statistics=['Sum']
)
write_metric_data = cloudwatch.get_metric_statistics(
Namespace='AWS/EBS',
MetricName='VolumeWriteBytes',
Dimensions=[
{'Name': 'VolumeId', 'Value': volume_id},
],
StartTime=days_ago,
EndTime=now,
Period=86400,
Statistics=['Sum']
)
if not read_metric_data['Datapoints'] and not write_metric_data['Datapoints']:
continue
volume_read_bytes = read_metric_data['Datapoints'][0]['Sum'] if read_metric_data['Datapoints'] else 0
volume_write_bytes = write_metric_data['Datapoints'][0]['Sum'] if write_metric_data['Datapoints'] else 0
volume_usage_bytes = volume_read_bytes + volume_write_bytes
volume_usage_percent = volume_usage_bytes / (volume_size * 1024 * 1024 * 1024) * 100
if volume_usage_percent < threshold_usage_percent:
ebs_volume["volume_id"] = volume_id
ebs_volume["region"] = reg
result.append(ebs_volume)
except Exception as e:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_get_ebs_volumes_by_type/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_ebs_volumes_by_type/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_ebs_volumes_by_type/aws_get_ebs_volumes_by_type.json
================================================
{
"action_title": "Get EBS Volumes By Type",
"action_description": "Get EBS Volumes By Type",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_ebs_volumes_by_type",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2","CATEGORY_TYPE_AWS_EBC", "CATEGORY_TYPE_COST_OPT" ]
}
================================================
FILE: AWS/legos/aws_get_ebs_volumes_by_type/aws_get_ebs_volumes_by_type.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region.')
def aws_get_ebs_volumes_by_type_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_ebs_volumes_by_type(handle, region: str) -> Dict:
"""aws_get_ebs_volumes_by_type Returns an dict of ebs volumes with there types.
:type region: string
:param region: AWS Region.
:rtype: Dict of ebs volumes with there types.
"""
result = {}
try:
ec2Client = handle.resource('ec2', region_name=region)
volumes = ec2Client.volumes.all()
# collecting the volumes by there types
for volume in volumes:
volume_id = volume.id
volume_type = volume.volume_type
if volume_type in result:
result[volume_type].append(volume_id)
else:
result[volume_type] = [volume_id]
except Exception as e:
raise Exception(e) from e
return result
================================================
FILE: AWS/legos/aws_get_ebs_volumes_without_gp3_type/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_ebs_volumes_without_gp3_type/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_ebs_volumes_without_gp3_type/aws_get_ebs_volumes_without_gp3_type.json
================================================
{
"action_title": "Get AWS EBS Volume Without GP3 Type",
"action_description": "AWS recently introduced the General Purpose SSD (gp3) volume type, which is designed to provide higher IOPS performance at a lower cost than the gp2 volume type.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_ebs_volumes_without_gp3_type",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check":true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS"],
"action_next_hop": ["2475714639442a9adcd0a87f7d193f6e8a6bbb9537d1eb6b03a6befb8ef84b19"],
"action_next_hop_parameter_mapping": {"2475714639442a9adcd0a87f7d193f6e8a6bbb9537d1eb6b03a6befb8ef84b19": {"name": "Change AWS EBS Volume To GP3 Type", "region":".[0].region","ebs_volume_ids":"map(.volume_id)"}}
}
================================================
FILE: AWS/legos/aws_get_ebs_volumes_without_gp3_type/aws_get_ebs_volumes_without_gp3_type.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='AWS Region.')
def aws_get_ebs_volumes_without_gp3_type_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_ebs_volumes_without_gp3_type(handle, region: str = "") -> Tuple:
"""aws_get_ebs_volumes_without_gp3_type Returns an array of ebs volumes.
:type region: string
:param region: Used to filter the volume for specific region.
:rtype: Tuple with status result and list of EBS Volume without GP3 type.
"""
result=[]
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
# Filtering the volume by region
ec2Client = handle.resource('ec2', region_name=reg)
volumes = ec2Client.volumes.all()
# collecting the volumes which has zero attachments
for volume in volumes:
volume_dict = {}
if volume.volume_type != "gp3":
volume_dict["region"] = reg
volume_dict["volume_id"] = volume.id
volume_dict["volume_type"] = volume.volume_type
result.append(volume_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_get_ec2_cpu_consumption/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Sandbox](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_ec2_cpu_consumption/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_ec2_cpu_consumption/aws_get_ec2_cpu_consumption.json
================================================
{
"action_title": "Get EC2 CPU Consumption For All Instances",
"action_description": "Get EC2 CPU Consumption For All Instances",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_ec2_cpu_consumption",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_get_ec2_cpu_consumption/aws_get_ec2_cpu_consumption.py
================================================
## Copyright (c) 2022 unSkript, Inc
## All rights reserved.
## written by Doug Sillars with the aid of ChatGPT
##read the blog https://unskript.com/will-ai-replace-us-using-chatgpt-to-create-python-actions-for-unskript/
##
import pprint
from typing import Dict
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from beartype import beartype
@beartype
def aws_get_ec2_cpu_consumption_printer(output):
if output is None:
return
pprint.pprint(output)
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region of the ECS service')
@beartype
def aws_get_ec2_cpu_consumption(handle, region: str) -> Dict:
ec2Client = handle.client('ec2', region_name=region)
cw= handle.client('cloudwatch', region_name=region)
res = aws_get_paginator(ec2Client, "describe_instances", "Reservations")
# Get the current time and the time 24 hours ago
now = datetime.now()
yesterday = now - timedelta(hours=24)
# Set the start and end times for the data to retrieve
start_time = yesterday.strftime('%Y-%m-%dT%H:%M:%SZ')
end_time = now.strftime('%Y-%m-%dT%H:%M:%SZ')
results={}
# Iterate through the list of instances
for reservation in res:
for instance in reservation['Instances']:
# Get the instance ID and launch time
instance_id = instance['InstanceId']
# Get the average CPU usage for the last 24 hours
response = cw.get_metric_statistics(
Namespace='AWS/EC2',
MetricName='CPUUtilization',
Dimensions=[
{
'Name': 'InstanceId',
'Value': instance_id
},
],
StartTime=start_time,
EndTime=end_time,
Period=3600,
Statistics=['Average']
)
# Calculate the average CPU usage for the past 24 hours
#error check for the presence of CPU usage data
if len(response['Datapoints'])>0:
cpu_utilization_values = [datapoint['Average'] for
datapoint in response['Datapoints']]
avg_cpu_utilization = sum(cpu_utilization_values) / len(cpu_utilization_values)
results[instance_id] = avg_cpu_utilization
else:
results[instance_id] = "error"
return results
================================================
FILE: AWS/legos/aws_get_ec2_data_traffic/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Sandbox](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_ec2_data_traffic/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_ec2_data_traffic/aws_get_ec2_data_traffic.json
================================================
{
"action_title": "Get EC2 Data Traffic In and Out For All Instances",
"action_description": "Get EC2 Data Traffic In and Out For All Instances",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_ec2_data_traffic",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_get_ec2_data_traffic/aws_get_ec2_data_traffic.py
================================================
## Copyright (c) 2022 unSkript, Inc
## All rights reserved.
## written by Doug Sillars with the aid of ChatGPT
##read the blog https://unskript.com/will-ai-replace-us-using-chatgpt-to-create-python-actions-for-unskript/
##
##
import pprint
from typing import Dict
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from beartype import beartype
@beartype
def aws_get_ec2_data_traffic_printer(output):
if output is None:
return
pprint.pprint(output)
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region of the ECS service')
@beartype
def aws_get_ec2_data_traffic(handle, region: str) -> Dict:
ec2Client = handle.client('ec2', region_name=region)
cloudwatch= handle.client('cloudwatch', region_name=region)
res = aws_get_paginator(ec2Client, "describe_instances", "Reservations")
# Set the desired time range for the data traffic metrics
time_range = {
'StartTime': datetime.utcnow() - timedelta(hours=1),
'EndTime': datetime.utcnow()
}
result={}
# Iterate through the list of instances
for reservation in res:
for instance in reservation['Instances']:
# Get the instance ID and launch time
instance_id = instance['InstanceId']
# Set the desired dimensions for the data traffic metrics
dimensions = [
{
'Name': 'InstanceId',
'Value': instance_id
}
]
# Get the data traffic in and out metrics for all EC2 instances
metrics = cloudwatch.get_metric_data(
MetricDataQueries=[
{
'Id': 'm1',
'MetricStat': {
'Metric': {
'Namespace': 'AWS/EC2',
'MetricName': 'NetworkIn',
'Dimensions': dimensions
},
'Period': 3600,
'Stat': 'Sum',
'Unit': 'Bytes'
}
},
{
'Id': 'm2',
'MetricStat': {
'Metric': {
'Namespace': 'AWS/EC2',
'MetricName': 'NetworkOut',
'Dimensions': dimensions
},
'Period': 3600,
'Stat': 'Sum',
'Unit': 'Bytes'
}
}
],
StartTime=time_range['StartTime'],
EndTime=time_range['EndTime']
)
#bytes dont mean anything. Lets use MB
if len(metrics['MetricDataResults'][0]['Values'])>0:
NetworkInMB = round(
float(metrics['MetricDataResults'][0]['Values'][0])/1024/1024,
2
)
else:
NetworkInMB = "error"
if len(metrics['MetricDataResults'][1]['Values'])>0:
NetworkOutMB = round(
float(metrics['MetricDataResults'][1]['Values'][0])/1024/1024,
2
)
else:
NetworkOutMB = "error"
metricsIwant = {
metrics['MetricDataResults'][0]['Label'] : NetworkInMB,
metrics['MetricDataResults'][1]['Label'] : NetworkOutMB
}
result[instance_id] = metricsIwant
return result
================================================
FILE: AWS/legos/aws_get_ec2_instance_age/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Sandbox](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_ec2_instance_age/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_ec2_instance_age/aws_get_ec2_instance_age.json
================================================
{
"action_title": "Get Age of all EC2 Instances in Days",
"action_description": "Get Age of all EC2 Instances in Days",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_ec2_instance_age",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_get_ec2_instance_age/aws_get_ec2_instance_age.py
================================================
## Copyright (c) 2022 unSkript, Inc
## All rights reserved.
##
## written by Doug Sillars with the aid of ChatGPT
##read the blog https://unskript.com/will-ai-replace-us-using-chatgpt-to-create-python-actions-for-unskript/
##
import pprint
from typing import Dict
from datetime import datetime, timezone
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from beartype import beartype
@beartype
def aws_get_ec2_instance_age_printer(output):
if output is None:
return
pprint.pprint(output)
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region of the ECS service')
@beartype
def aws_get_ec2_instance_age(handle, region: str) -> Dict:
ec2Client = handle.client('ec2', region_name=region)
res = aws_get_paginator(ec2Client, "describe_instances", "Reservations")
# Get the current time
now = datetime.now(timezone.utc)
result={}
# Iterate through the list of instances
for reservation in res:
for instance in reservation['Instances']:
# Get the instance ID and launch time
instance_id = instance['InstanceId']
launch_time = instance['LaunchTime']
# Calculate the age of the instance
age = now - launch_time
# Print the instance ID and age
ageText = f"Instance {instance_id} is {age.days} days old"
print(ageText)
result[instance_id] = age.days
return result
================================================
FILE: AWS/legos/aws_get_ec2_instances_with_smaller_cpu_size/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_ec2_instances_with_smaller_cpu_size/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_ec2_instances_with_smaller_cpu_size/aws_get_ec2_instances_with_smaller_cpu_size.json
================================================
{
"action_title": "Get AWS EC2 with smaller CPU size",
"action_description": "This action finds EC2 instances with smaller CPU size than threshold. (vCPU count)",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_ec2_instances_with_smaller_cpu_size",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check":true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2"],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: AWS/legos/aws_get_ec2_instances_with_smaller_cpu_size/aws_get_ec2_instances_with_smaller_cpu_size.py
================================================
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import List, Optional
from pydantic import BaseModel, Field
import pprint
import json
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.legos.aws.aws_get_all_ec2_instances.aws_get_all_ec2_instances import aws_get_all_ec2_instances
class InputSchema(BaseModel):
region: Optional[str] = Field(
'', description='AWS Region to get the RDS Instance', title='AWS Region'
)
instance_ids: Optional[List] = Field(
'', description='List of instance IDs to check.', title='List of Instance IDs'
)
threshold: Optional[float] = Field(
default= 2,
description='The CPU size threshold. Default value is 2.0. Size map is as follows-\n "nano": 2,\n "micro": 2,\n "small": 1,\n "medium": 1,\n "large": 2,\n "xlarge": 4,\n "2xlarge": 8,\n "3xlarge": 12,\n "4xlarge": 16,\n "6xlarge": 24,\n "8xlarge": 32,\n "9xlarge": 36,\n "10xlarge": 40,\n "12xlarge": 48,\n "16xlarge": 64,\n "18xlarge": 72,\n "24xlarge": 96,\n "32xlarge": 128,\n "metal": 96',
title='Threshold (vCPU)',
)
def aws_get_ec2_instances_with_smaller_cpu_size_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_ec2_instances_with_smaller_cpu_size(handle, instance_ids: list = [], region: str = "", threshold: float=2.0):
"""Check the CPU size (vCPU count) and compare with the threshold.
:type threshold: float
:param threshold: The CPU size threshold. Example value is 2.0.
:type instance_ids: list
:param instance_ids: List of instance IDs to check.
:type region: str
:param region: Region to get instances from.
:rtype: Status, List of dicts of instance IDs with CPU size less than the threshold
"""
size_to_cpu_map_str = """
{
"nano": 2,
"micro": 2,
"small": 1,
"medium": 1,
"large": 2,
"xlarge": 4,
"2xlarge": 8,
"3xlarge": 12,
"4xlarge": 16,
"6xlarge": 24,
"8xlarge": 32,
"9xlarge": 36,
"10xlarge": 40,
"12xlarge": 48,
"16xlarge": 64,
"18xlarge": 72,
"24xlarge": 96,
"32xlarge": 128,
"metal": 96
}
"""
size_to_cpu_map = json.loads(size_to_cpu_map_str)
result = []
instances_with_low_cpu_size = {}
try:
if instance_ids and not region:
raise ValueError("Region must be specified when instance IDs are given.")
if instance_ids and region:
# If instance_ids and region are given
regions = [region]
all_instance_ids = [{region: instance_ids}]
elif not instance_ids and region:
# If instance_ids are not given but region is given
regions = [region]
all_instance_ids = [{region: aws_get_all_ec2_instances(handle, region)}]
else:
# If neither instance_ids nor region are given
regions = aws_list_all_regions(handle)
all_instance_ids = []
for reg in regions:
try:
all_instance_ids.append({reg:aws_get_all_ec2_instances(handle, reg)})
except Exception:
pass
for region_instances in all_instance_ids:
for selected_region, inst_ids in region_instances.items():
ec2 = handle.client('ec2', region_name=selected_region)
for instance_id in inst_ids:
# Get the instance details
resp = ec2.describe_instances(InstanceIds=[instance_id])
# Get the instance type
instance_type = resp['Reservations'][0]['Instances'][0]['InstanceType']
# Get the size from the instance type
instance_size = instance_type.split('.')[1]
# Get the vCPU count from the size using the mapping
cpu_size = size_to_cpu_map.get(instance_size, 0)
# If the CPU size is less than the threshold, add to the list.
if cpu_size < threshold:
if region not in instances_with_low_cpu_size:
instances_with_low_cpu_size = {"region":selected_region, "instance_id": instance_id}
result.append(instances_with_low_cpu_size)
except Exception as e:
raise e
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_get_ecs_instances_without_autoscaling/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_ecs_instances_without_autoscaling/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_ecs_instances_without_autoscaling/aws_get_ecs_instances_without_autoscaling.json
================================================
{
"action_title": "AWS ECS Instances without AutoScaling policy",
"action_description": "AWS ECS Instances without AutoScaling policy.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_ecs_instances_without_autoscaling",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check":true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_ECS", "CATEGORY_TYPE_SECOPS"],
"action_next_hop": [],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: AWS/legos/aws_get_ecs_instances_without_autoscaling/aws_get_ecs_instances_without_autoscaling.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='AWS Region',
description='AWS Region.')
def aws_get_ecs_instances_without_autoscaling_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_ecs_instances_without_autoscaling(handle, region: str = "") -> Tuple:
"""aws_get_ecs_instances_without_autoscaling Returns an array of instances.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: AWS Region.
:rtype: Array of instances.
"""
if not handle or (region and region not in aws_list_all_regions(handle)):
raise ValueError("Invalid input parameters provided.")
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
if reg not in aws_list_all_regions(handle):
raise ValueError(f"Invalid region provided: {reg}")
ecs_Client = handle.client('ecs', region_name=reg)
autoscaling_client = handle.client('autoscaling', region_name=reg)
response = aws_get_paginator(ecs_Client, "list_clusters", "clusterArns")
cluster_names = [arn.split('/')[-1] for arn in response]
for cluster in cluster_names:
response_1 = aws_get_paginator(ecs_Client, "list_container_instances",
"containerInstanceArns", cluster=cluster)
if not response_1:
continue
container_instances_data = ecs_Client.describe_container_instances(
cluster=cluster,
containerInstances=response_1
)
for ec2_instance in container_instances_data['containerInstances']:
cluster_dict = {}
response = autoscaling_client.describe_auto_scaling_instances(
InstanceIds=[ec2_instance['ec2InstanceId']]
)
if response['AutoScalingInstances']:
asg_name = response['AutoScalingInstances'][0]['AutoScalingGroupName']
asg_response = autoscaling_client.describe_auto_scaling_groups(
AutoScalingGroupNames=[asg_name]
)
if not asg_response['AutoScalingGroups']:
cluster_dict["instance_id"] = ec2_instance['ec2InstanceId']
cluster_dict["cluster"] = cluster
cluster_dict["region"] = reg
result.append(cluster_dict)
else:
cluster_dict["instance_id"] = ec2_instance['ec2InstanceId']
cluster_dict["cluster"] = cluster
cluster_dict["region"] = reg
result.append(cluster_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_get_ecs_services_status/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_ecs_services_status/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: AWS/legos/aws_get_ecs_services_status/aws_get_ecs_services_status.json
================================================
{
"action_title": "Get AWS ECS Service Status",
"action_description": "Get the Status of an AWS ECS Service",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_ecs_services_status",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS" ]
}
================================================
FILE: AWS/legos/aws_get_ecs_services_status/aws_get_ecs_services_status.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region of the ECS service.')
def aws_get_ecs_services_status_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_ecs_services_status(handle, region: str) -> Dict:
"""aws_get_ecs_services_status returns the status of all ECS services.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: AWS Region of the ECS service.
:rtype: Dict with the services status status info.
"""
healthClient = handle.client('ecs', region_name=region)
clusters = healthClient.list_clusters()['clusterArns']
output = {}
for cluster in clusters:
clusterName = cluster.split('/')[1]
services = healthClient.list_services(cluster=clusterName)['serviceArns']
if len(services) > 0:
servises_status = healthClient.describe_services(cluster=cluster, services=services)
for service in servises_status['services']:
output[service['serviceName']] = service['status']
return output
================================================
FILE: AWS/legos/aws_get_ecs_services_without_autoscaling/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_ecs_services_without_autoscaling/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_ecs_services_without_autoscaling/aws_get_ecs_services_without_autoscaling.json
================================================
{
"action_title": "AWS ECS Services without AutoScaling policy",
"action_description": "AWS ECS Services without AutoScaling policy.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_ecs_services_without_autoscaling",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check":true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_ECS", "CATEGORY_TYPE_SECOPS"],
"action_next_hop": [],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: AWS/legos/aws_get_ecs_services_without_autoscaling/aws_get_ecs_services_without_autoscaling.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='AWS Region',
description='AWS Region.')
def aws_get_ecs_services_without_autoscaling_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_ecs_services_without_autoscaling(handle, region: str = "") -> Tuple:
"""aws_get_ecs_services_without_autoscaling Returns an array of Sevices.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: AWS Region.
:rtype: Array of Sevices.
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
ecs_Client = handle.client('ecs', region_name=reg)
autoscaling_client = handle.client('application-autoscaling', region_name=reg)
response = aws_get_paginator(ecs_Client, "list_clusters", "clusterArns")
cluster_names = [arn.split('/')[-1] for arn in response]
for cluster in cluster_names:
response_1 = aws_get_paginator(ecs_Client, "list_services",
"serviceArns", cluster=cluster)
for service in response_1:
cluster_dict = {}
response_2 = autoscaling_client.describe_scaling_policies(
ServiceNamespace='ecs', ResourceId=service)
scaling_policies = response_2['ScalingPolicies']
if not scaling_policies:
cluster_dict["service"] = service
cluster_dict["cluster"] = cluster
cluster_dict["region"] = reg
result.append(cluster_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_get_generated_policy/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_generated_policy/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_generated_policy/aws_get_generated_policy.json
================================================
{
"action_title": "AWS Get Generated Policy",
"action_description": "Given a Region and the ID of a policy generation job, this Action will return the policy (once it has been completed).",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_generated_policy",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_COST_OPT","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_IAM"]
}
================================================
FILE: AWS/legos/aws_get_generated_policy/aws_get_generated_policy.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from __future__ import annotations
import pprint
from typing import Dict
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def aws_get_generated_policy_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_generated_policy(handle, region:str,jobId:str) -> Dict:
client = handle.client('accessanalyzer', region_name=region)
response = client.get_generated_policy(
jobId=jobId,
includeResourcePlaceholders=True,
includeServiceLevelTemplate=True
)
result = {}
result['generatedPolicyResult'] = response['generatedPolicyResult']
result['generationStatus'] = response['jobDetails']['status']
return result
================================================
FILE: AWS/legos/aws_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_handle/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_handle/aws_get_handle.json
================================================
{
"action_title": "Get AWS boto3 handle",
"action_description": "Get AWS boto3 handle",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_supports_iteration": false,
"action_categories": [ "CATEGORY_TYPE_AWS" ]
}
================================================
FILE: AWS/legos/aws_get_handle/aws_get_handle.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel
from unskript.connectors.aws import Session
class InputSchema(BaseModel):
pass
def aws_get_handle(handle: Session):
"""aws_get_handle returns the AWS session handle.
:rtype: AWS handle.
"""
return handle
================================================
FILE: AWS/legos/aws_get_iam_users_without_attached_policies/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_iam_users_without_attached_policies/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_iam_users_without_attached_policies/aws_get_iam_users_without_attached_policies.json
================================================
{
"action_title": "AWS list IAM users without attached policies",
"action_description": "Get a list of all IAM users that do not have any user-managed or AWS-managed policies attached to them",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_iam_users_without_attached_policies",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_IAM"],
"action_next_hop": [],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: AWS/legos/aws_get_iam_users_without_attached_policies/aws_get_iam_users_without_attached_policies.py
================================================
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Tuple
from unskript.legos.aws.aws_list_all_iam_users.aws_list_all_iam_users import aws_list_all_iam_users
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
pass
def aws_get_iam_users_without_attached_policies_printer(output):
if output is None:
return
status, res = output
if status:
print("There are no IAM users that do not have any user-managed or AWS-managed policies attached to them")
else:
print("IAM users that do not have any user-managed or AWS-managed policies attached to them: ",res)
def aws_get_iam_users_without_attached_policies(handle) -> Tuple:
"""aws_get_iam_users_without_attached_policies lists all the IAM users that do not have any user-managed or AWS-managed policies attached to them
:type handle: object
:param handle: Object returned from Task Validate
:rtype: Status, List os all IAM users that do not have any user-managed or AWS-managed policies attached to them
"""
result = []
iam_client = handle.client('iam')
paginator = iam_client.get_paginator('list_users')
for response in paginator.paginate():
for user in response['Users']:
user_name = user['UserName']
try:
# Check for user-managed policies attached to the user
user_policies = iam_client.list_user_policies(UserName=user_name)
# Check for AWS-managed policies attached to the user
attached_policies = iam_client.list_attached_user_policies(UserName=user_name)
# If the user has no policies, add to result
if not user_policies['PolicyNames'] and not attached_policies['AttachedPolicies']:
result.append(user_name)
except Exception as e:
print(f"An error occurred while processing user {user_name}: {e}")
return (False, result) if result else (True, None)
================================================
FILE: AWS/legos/aws_get_idle_emr_clusters/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_idle_emr_clusters/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_idle_emr_clusters/aws_get_idle_emr_clusters.json
================================================
{
"action_title": "AWS Get Idle EMR Clusters",
"action_description": "This action list of EMR clusters that have been idle for more than the specified time.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_idle_emr_clusters",
"action_needs_credential": true,
"action_is_check": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_RDS"],
"action_next_hop": [],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: AWS/legos/aws_get_idle_emr_clusters/aws_get_idle_emr_clusters.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Tuple
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.connectors.aws import aws_get_paginator
from datetime import datetime, timedelta
import pprint
class InputSchema(BaseModel):
region: Optional[str] = Field(
default='',
title='AWS Region',
description='AWS Region.'
)
max_idle_time: Optional[int] = Field(
default=30,
title='Max Idle Time (minutes)',
description='The maximum idle time in minutes.'
)
def aws_get_idle_emr_clusters_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_idle_emr_clusters(handle, max_idle_time: int = 30, region: str = "") -> Tuple:
"""aws_get_idle_emr_clusters Gets list of idle EMR clusters.
:type region: string
:param region: AWS Region.
:type max_idle_time: int
:param max_idle_time: (minutes) The maximum idle time in minutes.
:rtype: List of idle EMR clusters.
"""
result = []
all_regions = [region] if region else aws_list_all_regions(handle)
min_last_state_change_time = datetime.now() - timedelta(minutes=max_idle_time)
for reg in all_regions:
try:
emr_Client = handle.client('emr', region_name=reg)
response = aws_get_paginator(emr_Client, "list_clusters", "Clusters")
for cluster in response:
if 'Status' in cluster and 'Timeline' in cluster['Status'] and 'ReadyDateTime' in cluster['Status']['Timeline']:
last_state_change_time = cluster['Status']['Timeline']['ReadyDateTime']
if last_state_change_time < min_last_state_change_time:
cluster_dict = {
"cluster_id": cluster['Id'],
"region": reg
}
result.append(cluster_dict)
except Exception as error:
pass
if len(result) != 0:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_get_instance_detail_with_private_dns_name/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_instance_detail_with_private_dns_name/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_instance_detail_with_private_dns_name/aws_get_instance_detail_with_private_dns_name.json
================================================
{
"action_title": "Get AWS Instance Details with Matching Private DNS Name",
"action_description": "Use this action to get details of an AWS EC2 Instance that matches a Private DNS Name",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_instance_detail_with_private_dns_name",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_get_instance_detail_with_private_dns_name/aws_get_instance_detail_with_private_dns_name.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
class InputSchema(BaseModel):
dns_name: str = Field(
title='Private DNS Name',
description='Private DNS Name.')
region: str = Field(
title='Region',
description='AWS Region of the resource.')
def aws_get_instance_detail_with_private_dns_name_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_instance_detail_with_private_dns_name(
handle,
dns_name: str,
region: str) -> List:
"""aws_get_instance_detail_with_private_dns_name Returns an array of private dns name.
:type handle: object
:param handle: Object returned from task.validate(...).
:type dns_name: string
:param dns_name: Private DNS Name.
:type region: string
:param region: AWS Region of the resource.
:rtype: Returns an array of private dns name
"""
ec2Client = handle.client('ec2', region_name=region)
res = aws_get_paginator(ec2Client, "describe_instances", "Reservations",
Filters=[{"Name": 'private-dns-name', "Values": [dns_name]}])
instances = []
for reservation in res:
for instance in reservation['Instances']:
instances.append(instance)
return instances
================================================
FILE: AWS/legos/aws_get_instance_details/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_instance_details/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_instance_details/aws_get_instance_details.json
================================================
{
"action_title": "Get AWS Instances Details",
"action_description": "Get AWS Instances Details",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_instance_details",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_get_instance_details/aws_get_instance_details.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from beartype import beartype
class InputSchema(BaseModel):
instance_id: str = Field(
title='Instance Id',
description='ID of the instance.')
region: str = Field(
title='Region',
description='AWS Region of the instance.')
def aws_get_instances_printer(output):
if output is None:
return
pprint.pprint(output)
@beartype
def aws_get_instance_details(handle, instance_id: str, region: str) -> Dict:
"""aws_get_instance_details Returns instance details.
:type handle: object
:param handle: Object returned by the task.validate(...) method.
:type instance_ids: list
:param instance_ids: List of instance ids.
:type region: string
:param region: Region for instance.
:rtype: Dict with the instance details.
"""
ec2client = handle.client('ec2', region_name=region)
instances = []
response = ec2client.describe_instances(
Filters=[{"Name": "instance-id", "Values": [instance_id]}])
for reservation in response["Reservations"]:
for instance in reservation["Instances"]:
instances.append(instance)
return instances[0]
================================================
FILE: AWS/legos/aws_get_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_instances/aws_get_instances.json
================================================
{
"action_title": "List All AWS EC2 Instances Under the ELB",
"action_description": " Get a list of all AWS EC2 Instances from given ELB",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_instances",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_get_instances/aws_get_instances.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
## @author: Yugal Pachpande, @email: yugal.pachpande@unskript.com
##
from typing import List
from pydantic import BaseModel, Field
import pandas as pd
class InputSchema(BaseModel):
elb_name: str = Field(
title='Elastic Load Balancer Name',
description='Name of the Elastic Load Balancer Name')
region: str = Field(
title='Region',
description='AWS Region of the ECS service')
def aws_get_instances_printer(output):
if output is None:
return
df = pd.DataFrame(output)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
print("\n")
print(df)
def aws_get_instances(handle, elb_name: str, region: str) -> List:
"""aws_get_all_instances Get a list of all AWS EC2 Instances from given ELB
:type handle: object
:param handle: Object returned from task.validate(...).
:type elb_name: string
:param elb_name: Name of the Elastic Load Balancer Name
:type region: string
:param region: AWS Region of the ECS service.
:rtype: list of dict with all AWS EC2 Instances from given ELB
"""
elbClient = handle.client('elb', region_name=region)
res = elbClient.describe_instance_health(
LoadBalancerName=elb_name,
)
instances = []
for instance in res['InstanceStates']:
instances.append(instance)
return instances
================================================
FILE: AWS/legos/aws_get_internet_gateway_by_vpc/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_internet_gateway_by_vpc/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_internet_gateway_by_vpc/aws_get_internet_gateway_by_vpc.json
================================================
{
"action_title": "AWS Get Internet Gateway by VPC ID",
"action_description": "AWS Get Internet Gateway by VPC ID",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_internet_gateway_by_vpc",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_VPC" ]
}
================================================
FILE: AWS/legos/aws_get_internet_gateway_by_vpc/aws_get_internet_gateway_by_vpc.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
class InputSchema(BaseModel):
vpc_id: str = Field(
title='VPC ID',
description='VPC ID of the Instance.')
region: str = Field(
title='Region',
description='AWS Region.')
def aws_get_internet_gateway_by_vpc_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_internet_gateway_by_vpc(handle, vpc_id: str, region: str) -> List:
"""aws_get_internet_gateway_by_vpc Returns an List of internet Gateway.
:type handle: object
:param handle: Object returned from task.validate(...).
:type vpc_id: str
:param vpc_id: VPC ID to find Internet Gateway.
:type region: str
:param region: Region to filter instance.
:rtype: List of Internet Gateway.
"""
ec2Client = handle.client('ec2', region_name=region)
result = []
try:
response = aws_get_paginator(ec2Client, "describe_internet_gateways", "InternetGateways",
Filters=[{'Name': 'attachment.vpc-id','Values': [vpc_id]}])
for nat_info in response:
if "InternetGatewayId" in nat_info:
result.append(nat_info["InternetGatewayId"])
except Exception as error:
result.append({"error":error})
return result
================================================
FILE: AWS/legos/aws_get_lambdas_not_using_arm_graviton2_processor/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_lambdas_not_using_arm_graviton2_processor/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_lambdas_not_using_arm_graviton2_processor/aws_get_lambdas_not_using_arm_graviton2_processor.json
================================================
{
"action_title": "Find AWS Lambdas Not Using ARM64 Graviton2 Processor",
"action_description": "Find all AWS Lambda functions that are not using the Arm-based AWS Graviton2 processor for their runtime architecture",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_lambdas_not_using_arm_graviton2_processor",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_COST_OPT" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_LAMBDA"],
"action_next_hop": [],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: AWS/legos/aws_get_lambdas_not_using_arm_graviton2_processor/aws_get_lambdas_not_using_arm_graviton2_processor.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Tuple
from unskript.legos.aws.aws_execute_cli_command.aws_execute_cli_command import aws_execute_cli_command
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.connectors.aws import aws_get_paginator
import pprint
class InputSchema(BaseModel):
region: Optional[str] = Field(
'',
description='AWS region. Eg: "us-west-2"',
title='Region'
)
def aws_get_lambdas_not_using_arm_graviton2_processor_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_lambdas_not_using_arm_graviton2_processor(handle, region: str = "") -> Tuple:
"""aws_get_lambdas_not_using_arm_graviton2_processor finds AWS Lambda functions wnot using Graviton2 processor
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: AWS Region to get the instances from. Eg: "us-west-2"
:rtype: Tuple with status of result and list of Lambda functions that don't use the arm-based graviton2 processor
"""
result = []
all_regions = [region] if region else aws_list_all_regions(handle)
for reg in all_regions:
try:
lambda_client = handle.client('lambda', region_name=reg)
response = aws_get_paginator(lambda_client, "list_functions", "Functions")
for res in response:
architectures = res.get('Architectures', [])
function_name = res.get('FunctionName', "")
if 'arm64' not in architectures and function_name:
result.append({"function_name": function_name, "region": reg})
except Exception as e:
pass
if result:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_get_lambdas_with_high_error_rate/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_lambdas_with_high_error_rate/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_lambdas_with_high_error_rate/aws_get_lambdas_with_high_error_rate.json
================================================
{
"action_title": "Get AWS Lambdas With High Error Rate",
"action_description": "Get AWS Lambda Functions that exceed a given threshold error rate.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_lambdas_with_high_error_rate",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_COST_OPT","CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ROUTE53"]
}
================================================
FILE: AWS/legos/aws_get_lambdas_with_high_error_rate/aws_get_lambdas_with_high_error_rate.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Tuple, Optional
import datetime
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
error_rate_threshold: Optional[float] = Field(
0.1,
description='Error rate threshold value. Eg: 0.1 (i.e. 10%)',
title='Error Rate Threshold',
)
days_back: Optional[int] = Field(
7,
description=('Number of days to go back. Default value ids 7 days. '
'Eg: 7 (This checks for functions with high error rate in the last 7 days)'),
title='Days Back',
)
region: Optional[str] = Field(
'',
description='AWS region. Eg: "us-west-2"',
title='Region'
)
def aws_get_lambdas_with_high_error_rate_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_lambdas_with_high_error_rate(
handle,
error_rate_threshold:float=0.1,
days_back:int=7,
region:str=""
) -> Tuple:
"""aws_get_lambdas_with_high_error_rate finds AWS Lambda functions with high error rate
:type region: string
:param region: AWS Region to get the instances from. Eg: "us-west-2"
:type error_rate_threshold: float
:param error_rate_threshold: (in percent) Idle CPU threshold (in percent)
:type days_back: int
:param days_back: (in hours) Idle CPU threshold (in hours)
:rtype: Tuple with status result and list of Lambda functions with high error rate
"""
if not handle or (region and region not in aws_list_all_regions(handle)):
raise ValueError("Invalid input parameters provided.")
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
lambdaClient = handle.client('lambda', region_name=reg)
cloudwatchClient = handle.client('cloudwatch', region_name=reg)
# Get a list of all the Lambda functions in your account
response = lambdaClient.list_functions()
number_of_days = int(days_back)
start_time = datetime.datetime.now() - datetime.timedelta(days=number_of_days)
# Iterate through the list of functions and filter out the ones with a high error rate
for function in response['Functions']:
# Get the configuration for the function
config_response = lambdaClient.get_function_configuration(
FunctionName=function['FunctionName']
)
# Get the Errors metric for the function
errors_response = cloudwatchClient.get_metric_statistics(
Namespace='AWS/Lambda',
MetricName='Errors',
Dimensions=[
{
'Name': 'FunctionName',
'Value': function['FunctionName']
},
],
StartTime=start_time,
EndTime=datetime.datetime.now(),
Period=3600,
Statistics=['Sum']
)
datapoints = errors_response.get('Datapoints')
if datapoints and 'Sum' in datapoints[0]:
errors_sum = datapoints[0]['Sum']
invocations = config_response.get('NumberOfInvocations', 0)
if invocations > 0:
error_rate = errors_sum / invocations
# Check if the error rate is greater than the threshold
if error_rate > error_rate_threshold:
lambda_func = {'function_name': function['FunctionName'], 'region': reg}
result.append(lambda_func)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_get_long_running_elasticcache_clusters_without_reserved_nodes/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_long_running_elasticcache_clusters_without_reserved_nodes/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_long_running_elasticcache_clusters_without_reserved_nodes/aws_get_long_running_elasticcache_clusters_without_reserved_nodes.json
================================================
{
"action_title": "AWS Get Long Running ElastiCache clusters Without Reserved Nodes",
"action_description": "This action gets information about long running ElastiCache clusters and their status, and checks if they have any reserved nodes associated with them.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_long_running_elasticcache_clusters_without_reserved_nodes",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ELASTICACHE"],
"action_next_hop": [],
"action_next_hop_parameter_mapping":{"51a0b15d932dddeea9b1991fb6299577756408ff7c47acc5dec3eb114e33562b": {"name": "Purchase Reserved Nodes For Long Running AWS ElastiCache Clusters", "region": ".[0].region"}}
}
================================================
FILE: AWS/legos/aws_get_long_running_elasticcache_clusters_without_reserved_nodes/aws_get_long_running_elasticcache_clusters_without_reserved_nodes.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from datetime import datetime, timedelta, timezone
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
region: Optional[str] = Field(
'', description='AWS Region to get the ElasticCache Cluster', title='AWS Region'
)
threshold: Optional[float] = Field(
10,
description='Threshold(in days) to find long running ElasticCache clusters. Eg: 30, This will find all the clusters that have been created a month ago.',
title='Threshold(in days)',
)
def aws_get_long_running_elasticcache_clusters_without_reserved_nodes_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_long_running_elasticcache_clusters_without_reserved_nodes(handle, region: str = "", threshold:int = 10) -> Tuple:
"""aws_get_long_running_elasticcache_clusters_without_reserved_nodes finds ElasticCache Clusters that are long running and have no reserved nodes
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: Region of the Cluster.
:type threshold: integer
:param threshold: Threshold(in days) to find long running ElasticCache clusters. Eg: 30, This will find all the clusters that have been created a month ago.
:rtype: status, list of clusters, nodetype and their region.
"""
result = []
reservedNodesPerRegion = {}
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
# Get the list of reserved node per region per type. We just need to maintain
# what type of reserved nodes are present per region. So, reservedNodesPerRegion
# would be like:
#
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_long_running_rds_instances_without_reserved_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_long_running_rds_instances_without_reserved_instances/aws_get_long_running_rds_instances_without_reserved_instances.json
================================================
{
"action_title": "AWS Get Long Running RDS Instances Without Reserved Instances",
"action_description": "This action gets information about long running instances and their status, and checks if they have any reserved nodes associated with them.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_long_running_rds_instances_without_reserved_instances",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_COST_OPT","CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_RDS"],
"action_next_hop": ["e0ff270a41b65b1804da257ffec5fbdec7dd51bdb3da925cced7fa3391bfe70b"],
"action_next_hop_parameter_mapping":{"e0ff270a41b65b1804da257ffec5fbdec7dd51bdb3da925cced7fa3391bfe70b": {"name": "Purchase Reserved Instances For Long Running AWS RDS Instances", "region": ".[0].region"}}
}
================================================
FILE: AWS/legos/aws_get_long_running_rds_instances_without_reserved_instances/aws_get_long_running_rds_instances_without_reserved_instances.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from tabulate import tabulate
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from datetime import datetime,timedelta, timezone
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
region: Optional[str] = Field('', description='AWS Region.', title='AWS Region')
threshold: Optional[float] = Field(
10,
description='Threshold(in days) to find long running RDS instances. Eg: 30, This will find all the instances that have been created a month ago.',
title='Threshold(in days)',
)
def aws_get_long_running_rds_instances_without_reserved_instances_printer(output):
if output is None:
print("Output is None.")
return
status, res = output
if status:
print("There are no DB instances that have been running for longer than the specified threshold and do not have corresponding reserved instances.")
else:
print("DB instances that have been running for longer than the specified threshold and do not have corresponding reserved instances:")
table_data = [[item['region'], item['instance_type'], item['instance']] for item in res]
headers = ['Region', 'Instance Type', 'Instance']
table = tabulate(table_data, headers=headers, tablefmt='grid')
print(table)
def aws_get_long_running_rds_instances_without_reserved_instances(handle, region: str = "", threshold: float = 10.0) -> Tuple:
"""aws_get_long_running_rds_instances_without_reserved_instances Gets all DB instances that have been running for longer than the specified threshold and do not have corresponding reserved instances.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: AWS Region.
:type threshold: int
:param threshold: Threshold(in days) to find long running RDS instances. Eg: 30, This will find all the instances that have been created a month ago.
:rtype: A tuple with a Status,and list of DB instances that don't have reserved instances
"""
result = []
all_regions = [region]
reservedInstancesPerRegion = {}
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
rdsClient = handle.client('rds', region_name=reg)
response = rdsClient.describe_reserved_db_instances()
reservedInstancesPerType = {}
if 'ReservedDBInstances' in response:
for ins in response['ReservedDBInstances']:
reservedInstancesPerType[ins['DBInstanceClass']] = True
reservedInstancesPerRegion[reg] = reservedInstancesPerType
except Exception:
pass
for reg in all_regions:
try:
rdsClient = handle.client('rds', region_name=reg)
response = aws_get_paginator(rdsClient, "describe_db_instances", "DBInstances")
for instance in response:
if instance['DBInstanceStatus'] == 'available':
# Check for existence of keys before using them
if 'InstanceCreateTime' in instance and 'DBInstanceClass' in instance:
uptime = datetime.now(timezone.utc) - instance['InstanceCreateTime']
if uptime > timedelta(days=threshold):
# Check if the DB instance type is present in the reservedInstancesPerRegion map.
reservedInstances = reservedInstancesPerRegion.get(reg, {})
if not reservedInstances.get(instance['DBInstanceClass']):
db_instance_dict = {
"region": reg,
"instance_type": instance['DBInstanceClass'],
"instance": instance['DBInstanceIdentifier']
}
result.append(db_instance_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_get_long_running_redshift_clusters_without_reserved_nodes/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_long_running_redshift_clusters_without_reserved_nodes/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_long_running_redshift_clusters_without_reserved_nodes/aws_get_long_running_redshift_clusters_without_reserved_nodes.json
================================================
{
"action_title": "AWS Get Long Running Redshift Clusters Without Reserved Nodes",
"action_description": "This action gets information about running clusters and their status, and checks if they have any reserved nodes associated with them.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_long_running_redshift_clusters_without_reserved_nodes",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_COST_OPT","CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_REDSHIFT"],
"action_next_hop": ["08d3033e428c5fa241be26cfc8787fb16c05c6aa31830075e730fefd5aaf744f"],
"action_next_hop_parameter_mapping":{"08d3033e428c5fa241be26cfc8787fb16c05c6aa31830075e730fefd5aaf744f": {"name": "Purchase Reserved Nodes For Long Running AWS Redshift Clusters", "region": ".[0].region"}}
}
================================================
FILE: AWS/legos/aws_get_long_running_redshift_clusters_without_reserved_nodes/aws_get_long_running_redshift_clusters_without_reserved_nodes.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Tuple
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
import pprint
from datetime import datetime,timedelta, timezone
class InputSchema(BaseModel):
region: Optional[str] = Field(
'',
description='AWS Region to get the Redshift Cluster',
title='AWS Region'
)
threshold: Optional[float] = Field(
10,
description='Threshold(in days) to find long running redshift clusters. Eg: 30, This will find all the clusters that have been created a month ago.',
title='Threshold(in days)',
)
def aws_get_long_running_redshift_clusters_without_reserved_nodes_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_long_running_redshift_clusters_without_reserved_nodes(handle, region: str = "", threshold:int = 10) -> Tuple:
"""aws_get_long_running_redshift_clusters_without_reserved_nodes finds Redshift Clusters that are long running and have no reserved nodes
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: Region of the Cluster.
:type threshold: integer
:param threshold: Threshold(in days) to find long running redshift clusters. Eg: 30, This will find all the clusters that have been created a month ago.
:rtype: status, list of clusters, nodetype and their region.
"""
if not handle or threshold < 0:
raise ValueError("Invalid input parameters provided.")
result = []
reservedNodesPerRegion = {}
all_regions = [region] if region else aws_list_all_regions(handle)
for reg in all_regions:
try:
redshiftClient = handle.client('redshift', region_name=reg)
response = redshiftClient.describe_reserved_nodes()
reservedNodesPerType = {}
if response['ReservedNodes']:
for node in response['ReservedNodes']:
reservedNodesPerType[node['NodeType']] = True
reservedNodesPerRegion[reg] = reservedNodesPerType
except Exception:
pass
for reg in all_regions:
try:
redshiftClient = handle.client('redshift', region_name=reg)
clusters = redshiftClient.describe_clusters()['Clusters']
for cluster in clusters:
cluster_age = datetime.now(timezone.utc) - cluster['ClusterCreateTime']
if cluster['ClusterStatus'] == 'available' and cluster_age.days > threshold:
reservedNodes = reservedNodesPerRegion.get(reg, {})
if not reservedNodes.get(cluster['NodeType']):
cluster_dict = {
"region": reg,
"cluster": cluster['ClusterIdentifier'],
"node_type": cluster['NodeType']
}
result.append(cluster_dict)
except Exception as error:
pass
if result:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_get_nat_gateway_by_vpc/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_nat_gateway_by_vpc/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_nat_gateway_by_vpc/aws_get_nat_gateway_by_vpc.json
================================================
{
"action_title": "AWS Get NAT Gateway Info by VPC ID",
"action_description": "This action is used to get the details about nat gateways configured for VPC.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_nat_gateway_by_vpc",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_VPC" ]
}
================================================
FILE: AWS/legos/aws_get_nat_gateway_by_vpc/aws_get_nat_gateway_by_vpc.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
vpc_id: str = Field(
title='VPC ID',
description='VPC ID of the Instance.')
region: str = Field(
title='Region',
description='AWS Region.')
def aws_get_nat_gateway_by_vpc_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_nat_gateway_by_vpc(handle, vpc_id: str, region: str) -> List:
"""aws_get_natgateway_by_vpc Returns an array of NAT gateways.
:type region: string
:param region: Region to filter instances.
:type vpc_id: string
:param vpc_id: ID of the Virtual Private Cloud (VPC)
:rtype: Array of NAT gateways.
"""
result = []
try:
ec2Client = handle.client('ec2', region_name=region)
response = ec2Client.describe_nat_gateways(
Filter=[{'Name': 'vpc-id','Values': [vpc_id]}])
if response['NatGateways']:
for i in response['NatGateways']:
nat_dict = {}
if "NatGatewayId" in i:
nat_dict["nat_id"] = i["NatGatewayId"]
if "SubnetId" in i:
nat_dict["subnet_id"] = i["SubnetId"]
if "VpcId" in i:
nat_dict["vpc_id"] = i["VpcId"]
for address in i["NatGatewayAddresses"]:
if "PrivateIp" in address:
nat_dict["private_ip"] = address["PrivateIp"]
if "PublicIp" in address:
nat_dict["public_ip"] = address["PublicIp"]
result.append(nat_dict)
except Exception:
pass
return result
================================================
FILE: AWS/legos/aws_get_nlb_targets/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://unskript.com)
================================================
FILE: AWS/legos/aws_get_nlb_targets/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_nlb_targets/aws_get_nlb_targets.json
================================================
{
"action_title": "Get all Targets for Network Load Balancer (NLB)",
"action_description": "Use this action to get all targets for Network Load Balancer (NLB)",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_nlb_targets",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2"]
}
================================================
FILE: AWS/legos/aws_get_nlb_targets/aws_get_nlb_targets.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region')
nlb_arn: str = Field(
title='Network Loadbalancer ARNs',
description='Network Load Balancer ARNs.')
def aws_get_nlb_targets_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_nlb_targets(handle, region: str, nlb_arn: str) -> List:
"""aws_get_nlb_targets lists Network loadbalancers target details.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: AWS Region.
:type nlb_arn: string
:param nlb_arn: Network Load Balancer ARNs.
:rtype: List Network load balancers target details.
"""
result = []
try:
elb_Client = handle.client('elbv2', region_name=region)
response = elb_Client.describe_target_health(TargetGroupArn=nlb_arn)
for target in response['TargetHealthDescriptions']:
target_dict = {}
target_dict["target_id"] = target['Target']['Id']
target_dict["target_port"] = target['Target']['Port']
target_dict["target_health"] = target['TargetHealth']['State']
result.append(target_dict)
except Exception as e:
raise Exception(e) from e
return result
================================================
FILE: AWS/legos/aws_get_nlbs_without_targets/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://unskript.com)
================================================
FILE: AWS/legos/aws_get_nlbs_without_targets/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_nlbs_without_targets/aws_get_nlbs_without_targets.json
================================================
{
"action_title": "AWS Get Network Load Balancer (NLB) without Targets",
"action_description": "Use this action to get AWS Network Load Balancer (NLB) without Targets",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_nlbs_without_targets",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_is_check": true,
"action_next_hop": [],
"action_next_hop_parameter_mapping": {},
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS" ,"CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_get_nlbs_without_targets/aws_get_nlbs_without_targets.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Tuple, Optional
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
region: Optional[str] = Field(
default='',
title='AWS Region',
description='AWS Region')
def aws_get_nlbs_without_targets_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_nlbs_without_targets(handle, region: str = "") -> Tuple:
"""aws_get_nlbs_without_targets lists Network loadbalancers ARNs without targets.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: AWS Region.
:rtype: lists Network loadbalancers ARNs without targets.
"""
if handle is None:
raise ValueError("Handle must not be None.")
if region and region not in aws_list_all_regions(handle):
raise ValueError(f"Invalid region: {region}.")
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
elbv2_client = handle.client('elbv2', region_name=reg)
resp = aws_get_paginator(elbv2_client, "describe_load_balancers", "LoadBalancers")
for elb in resp:
nlb_dict = {}
if elb['Type'] == "network":
target_groups = elbv2_client.describe_target_groups(
LoadBalancerArn=elb['LoadBalancerArn']
)
if len(target_groups['TargetGroups']) == 0:
nlb_dict["loadBalancer_arn"] = elb['LoadBalancerArn']
nlb_dict["loadBalancer_name"] = elb["LoadBalancerName"]
nlb_dict["region"] = reg
result.append(nlb_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_get_older_generation_rds_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_older_generation_rds_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_older_generation_rds_instances/aws_get_older_generation_rds_instances.json
================================================
{
"action_title": "AWS Get Older Generation RDS Instances",
"action_description": "AWS Get Older Generation RDS Instances action retrieves information about RDS instances using older generation instance types.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_older_generation_rds_instances",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_next_hop": ["e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"],
"action_next_hop_parameter_mapping": {"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855": {"name": "AWS Update RDS Instances from Old to New Generation", "region": ".[0].region", "rds_instance_ids":"map(.instance)"}},
"action_categories":[ "CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE" ,"CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_RDS"]
}
================================================
FILE: AWS/legos/aws_get_older_generation_rds_instances/aws_get_older_generation_rds_instances.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Tuple
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.connectors.aws import aws_get_paginator
import pprint
class InputSchema(BaseModel):
region: Optional[str] = Field('', description='AWS Region.', title='AWS Region')
def aws_get_older_generation_rds_instances_printer(output):
if output is None:
return
pprint.pprint(output)
def is_previous_gen_instance(instance_type):
previous_gen_instance_types = ['db.m1', 'db.m2', 'db.t1']
for prev_gen_type in previous_gen_instance_types:
if instance_type.startswith(prev_gen_type):
return True
return False
def aws_get_older_generation_rds_instances(handle, region: str = "") -> Tuple:
"""aws_get_older_generation_rds_instances Gets all older generation RDS DB instances
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: Optional, Region of the RDS.
:rtype: Status, List of old RDS Instances
"""
if not handle or (region and region not in aws_list_all_regions(handle)):
raise ValueError("Invalid input parameters provided.")
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
ec2Client = handle.client('rds', region_name=reg)
response = aws_get_paginator(ec2Client, "describe_db_instances", "DBInstances")
for db in response:
instance_type = ".".join(db['DBInstanceClass'].split(".", 2)[:2])
response = is_previous_gen_instance(instance_type)
if response:
db_instance_dict = {}
db_instance_dict["region"] = reg
db_instance_dict["instance"] = db['DBInstanceIdentifier']
result.append(db_instance_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_get_private_address_from_nat_gateways/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_private_address_from_nat_gateways/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_private_address_from_nat_gateways/aws_get_private_address_from_nat_gateways.json
================================================
{
"action_title": "AWS Get Private Address from NAT Gateways",
"action_description": "This action is used to get private address from NAT gateways.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_private_address_from_nat_gateways",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_TROUBLESHOOTING" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_NAT_GATEWAY"],
"action_next_hop": ["c123bb9eff909c27f2d330792689c63110889e0b7754041e2e24ade22ca16615"],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: AWS/legos/aws_get_private_address_from_nat_gateways/aws_get_private_address_from_nat_gateways.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Tuple, Optional
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='AWS Region.')
def aws_get_private_address_from_nat_gateways_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_private_address_from_nat_gateways(handle, region: str = "") -> Tuple:
"""aws_get_private_address_from_nat_gateways Returns an private address of NAT gateways.
:type region: string
:param region: Region to filter NAT Gateways.
:rtype: Tuple with private address of NAT gateways.
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
ec2Client = handle.client('ec2', region_name=reg)
response = aws_get_paginator(ec2Client, "describe_nat_gateways", "NatGateways")
for i in response:
nat_dict = {}
nat_dict["nat_id"] = i["NatGatewayId"]
nat_dict["vpc_id"] = i["VpcId"]
nat_dict["region"] = reg
for address in i["NatGatewayAddresses"]:
if "PrivateIp" in address:
nat_dict["private_ip"] = address["PrivateIp"]
result.append(nat_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_get_public_ec2_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Sandbox](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_public_ec2_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_public_ec2_instances/aws_get_public_ec2_instances.json
================================================
{
"action_title": "Get AWS EC2 Instances with a public IP",
"action_description": "lists all EC2 instances with a public IP",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_public_ec2_instances",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2" ]
}
================================================
FILE: AWS/legos/aws_get_public_ec2_instances/aws_get_public_ec2_instances.py
================================================
##
## Copyright (c) 2022 unSkript, Inc
## All rights reserved.
## Written by Doug Sillars (and a little help from ChatGPT)
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from beartype import beartype
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region.')
@beartype
def aws_get_public_ec2_instances_printer(output):
if output is None:
return
pprint.pprint(output)
@beartype
def aws_get_public_ec2_instances(handle, region: str) -> Dict:
ec2Client = handle.client('ec2', region_name=region)
res = aws_get_paginator(ec2Client, "describe_instances", "Reservations")
result={}
# Iterate through the list of instances
for reservation in res:
for instance in reservation['Instances']:
#print("instance",instance)
instance_id = instance['InstanceId']
public_DNS = instance['PublicDnsName']
if len(public_DNS)>0:
public_ip = instance['PublicIpAddress']
result[instance_id] = {"public DNS": public_DNS,"public IP":public_ip}
return result
================================================
FILE: AWS/legos/aws_get_publicly_accessible_db_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_publicly_accessible_db_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_publicly_accessible_db_instances/aws_get_publicly_accessible_db_instances.json
================================================
{
"action_title": "AWS Get Publicly Accessible RDS Instances",
"action_description": "AWS Get Publicly Accessible RDS Instances",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_publicly_accessible_db_instances",
"action_needs_credential": true,
"action_is_check": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_RDS" ],
"action_next_hop": ["dda26fd556dd6b59e2fac9c9ed6e81fc19e5374746049d494237bcdc6a17fae4"],
"action_next_hop_parameter_mapping": {"dda26fd556dd6b59e2fac9c9ed6e81fc19e5374746049d494237bcdc6a17fae4": {"name": "Secure Publicly Accessible Amazon RDS Instances","region":".[0].region","rds_instances":"map(.instance)" }}
}
================================================
FILE: AWS/legos/aws_get_publicly_accessible_db_instances/aws_get_publicly_accessible_db_instances.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from unskript.legos.utils import CheckOutput
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.connectors.aws import aws_get_paginator
class InputSchema(BaseModel):
region: Optional[str] = Field(
'',
title='Region for RDS',
description='Region of the RDS.'
)
def aws_get_publicly_accessible_db_instances_printer(output):
if output is None:
return
if isinstance(output, CheckOutput):
print(output.json())
else:
pprint.pprint(output)
def aws_get_publicly_accessible_db_instances(handle, region: str = "") -> Tuple:
"""aws_get_publicly_accessible_db_instances Gets all publicly accessible DB instances
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: Region of the RDS.
:rtype: CheckOutput with status result and list of publicly accessible RDS instances.
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
ec2Client = handle.client('rds', region_name=reg)
response = aws_get_paginator(ec2Client, "describe_db_instances", "DBInstances")
for db in response:
db_instance_dict = {}
if db['PubliclyAccessible']:
db_instance_dict["region"] = reg
db_instance_dict["instance"] = db['DBInstanceIdentifier']
result.append(db_instance_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_get_publicly_accessible_db_snapshots/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_publicly_accessible_db_snapshots/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_publicly_accessible_db_snapshots/aws_get_publicly_accessible_db_snapshots.json
================================================
{
"action_title": "AWS Get Publicly Accessible DB Snapshots in RDS",
"action_description": "AWS Get Publicly Accessible DB Snapshots in RDS",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_publicly_accessible_db_snapshots",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_is_check":true,
"action_verbs": ["get"],
"action_nouns": ["aws","database","snapshots","public","accessible"],
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS","CATEGORY_TYPE_SECOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_RDS" ],
"action_next_hop":["7c0d143556a33b81d3fb1ff08dfdd59cebe5d58b00b55e8ae660df2e42f71bfe"],
"action_next_hop_parameter_mapping":{"7c0d143556a33b81d3fb1ff08dfdd59cebe5d58b00b55e8ae660df2e42f71bfe": {"name": "Secure Publicly accessible Amazon RDS Snapshot","region": ".[0].region", "public_snapshot_ids":"map(.open_snapshot)"}}
}
================================================
FILE: AWS/legos/aws_get_publicly_accessible_db_snapshots/aws_get_publicly_accessible_db_snapshots.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from unskript.legos.utils import CheckOutput
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.legos.aws.aws_filter_all_manual_database_snapshots.aws_filter_all_manual_database_snapshots import aws_filter_all_manual_database_snapshots
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='Region of the RDS'
)
def aws_get_publicly_accessible_db_snapshots_printer(output):
if output is None:
return
if isinstance(output, CheckOutput):
print(output.json())
else:
pprint.pprint(output)
def aws_get_publicly_accessible_db_snapshots(handle, region: str=None) -> Tuple:
"""aws_get_publicly_accessible_db_snapshots lists of publicly accessible
db_snapshot_identifier.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: Region of the RDS.
:rtype: Object with status, result having publicly accessible Snapshots
Identifier in RDS, error
"""
manual_snapshots_list=[]
result=[]
all_regions = [region]
if region is None or not region:
all_regions = aws_list_all_regions(handle=handle)
try:
for r in all_regions:
snapshots_dict = {}
output = aws_filter_all_manual_database_snapshots(handle=handle, region=r)
snapshots_dict["region"]=r
snapshots_dict["snapshot"]=output
manual_snapshots_list.append(snapshots_dict)
except Exception as e:
raise e
for all_snapshots in manual_snapshots_list:
try:
ec2Client = handle.client('rds', region_name=all_snapshots['region'])
for each_snapshot in all_snapshots['snapshot']:
response = ec2Client.describe_db_snapshot_attributes(
DBSnapshotIdentifier=each_snapshot
)
db_attribute = response["DBSnapshotAttributesResult"]
for value in db_attribute['DBSnapshotAttributes']:
p_dict={}
if "all" in value["AttributeValues"]:
p_dict["region"] = all_snapshots['region']
p_dict["open_snapshot"] = db_attribute['DBSnapshotIdentifier']
result.append(p_dict)
except Exception:
pass
if len(result)!=0:
return (False, result)
return (True, [])
================================================
FILE: AWS/legos/aws_get_rds_automated_snapshots_above_retention_period/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_rds_automated_snapshots_above_retention_period/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_rds_automated_snapshots_above_retention_period/aws_get_rds_automated_snapshots_above_retention_period.json
================================================
{
"action_title": "Get AWS RDS automated db snapshots above retention period",
"action_description": "This Action gets the snapshots above a certain retention period.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_rds_automated_snapshots_above_retention_period",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":["CATEGORY_TYPE_CLOUDOPS","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_RDS"],
"action_next_hop":[],
"action_next_hop_parameter_mapping":{}
}
================================================
FILE: AWS/legos/aws_get_rds_automated_snapshots_above_retention_period/aws_get_rds_automated_snapshots_above_retention_period.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import Tuple, Optional
from pydantic import BaseModel, Field
from datetime import datetime, timedelta
import pytz
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.connectors.aws import aws_get_paginator
import pprint
class InputSchema(BaseModel):
region: Optional[str] = Field(
'', description='AWS Region of database.', title='Region'
)
threshold: Optional[int] = Field(
'',
description='The threshold number of days check for retention of automated snapshots. Default is 7 days',
title='Threshold(in days)',
)
def aws_get_rds_automated_snapshots_above_retention_period_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_rds_automated_snapshots_above_retention_period(handle, region: str="", threshold:int=7) -> Tuple:
"""aws_get_rds_automated_snapshots_above_retention_period List all the manual database snapshots.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: Region for database.
:type threshold: int
:param threshold: The threshold number of days check for retention of automated snapshots. Default is 7 days.
:rtype: List of manual database snapshots.
"""
if not handle or threshold <= 0: # Input validation
raise ValueError("Invalid handle or threshold must be a positive integer.")
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
min_creation_time = datetime.now(pytz.UTC) - timedelta(days=threshold)
for reg in all_regions:
try:
rdsClient = handle.client('rds', region_name=reg)
response = aws_get_paginator(rdsClient, "describe_db_snapshots","DBSnapshots",
SnapshotType='automated')
for snapshot in response:
if 'SnapshotCreateTime' in snapshot: # Check if the key exists
snapshot_time = snapshot['SnapshotCreateTime'].replace(tzinfo=pytz.UTC)
if snapshot_time < min_creation_time:
result.append({"db_identifier": snapshot['DBSnapshotIdentifier'], "region": reg})
except Exception as e:
pass
if len(result) != 0:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_get_redshift_query_details/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_redshift_query_details/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_redshift_query_details/aws_get_redshift_query_details.json
================================================
{
"action_title": "AWS Get Redshift Query Details",
"action_description": "Given an QueryId, this Action will give you the status of the Query, along with other data like the number of lines/",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_redshift_query_details",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_REDSHIFT" ]
}
================================================
FILE: AWS/legos/aws_get_redshift_query_details/aws_get_redshift_query_details.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from __future__ import annotations
from typing import Dict
from pydantic import BaseModel, Field
from beartype import beartype
class InputSchema(BaseModel):
region: str = Field(..., description='AWS Region.', title='Region')
queryId: str = Field(
description='Id of Redshift Query', title='queryId'
)
@beartype
def aws_get_redshift_query_details(handle, region: str, queryId:str) -> Dict:
client = handle.client('redshift-data', region_name=region)
response = client.describe_statement(
Id=queryId
)
resultReady = response['HasResultSet']
queryTimeNs = response['Duration']
ResultRows = response['ResultRows']
details = {"Status": response['Status'],
"resultReady": resultReady,
"queryTimeNs":queryTimeNs,
"ResultRows":ResultRows
}
return details
================================================
FILE: AWS/legos/aws_get_redshift_result/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_redshift_result/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_redshift_result/aws_get_redshift_result.json
================================================
{
"action_title": "AWS Get Redshift Result",
"action_description": "Given a QueryId, Get the Query Result, and format into a List",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_redshift_result",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_REDSHIFT" ]
}
================================================
FILE: AWS/legos/aws_get_redshift_result/aws_get_redshift_result.py
================================================
from __future__ import annotations
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import List
from pydantic import BaseModel, Field
from beartype import beartype
class InputSchema(BaseModel):
resultId: str = Field(description='Redshift Query Result', title='resultId')
region: str = Field(..., description='AWS Region', title='region')
@beartype
def aws_get_redshift_result(handle, region:str, resultId: str) -> List:
client = handle.client('redshift-data', region_name=region)
result = client.get_statement_result(
Id=resultId
)
#result has the Dictionary, but it is not easily queried
#get all the columns into an array
columnNames = []
for column in result['ColumnMetadata']:
columnNames.append(column['label'])
#print(columnNames)
#now let's make the output into a dict
listResult = []
for record in result['Records']:
entryCounter = 0
entryDict = {}
for entry in record:
for value in entry.values():
entryDict[columnNames[entryCounter]] = value
entryCounter +=1
#print("entryDict",entryDict)
listResult.append(entryDict)
#print(listResult)
return listResult
================================================
FILE: AWS/legos/aws_get_reserved_instances_about_to_retired/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_reserved_instances_about_to_retired/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_reserved_instances_about_to_retired/aws_get_reserved_instances_about_to_retired.json
================================================
{
"action_title": "AWS Get EC2 Instances About To Retired",
"action_description": "AWS Get EC2 Instances About To Retired",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_reserved_instances_about_to_retired",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_is_check": true,
"action_next_hop": [],
"action_next_hop_parameter_mapping": {},
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2", "CATEGORY_TYPE_COST_OPT"]
}
================================================
FILE: AWS/legos/aws_get_reserved_instances_about_to_retired/aws_get_reserved_instances_about_to_retired.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Tuple, Optional
from datetime import datetime, timezone
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
region: Optional[str] = Field(
title='Region',
description='AWS Region.')
threshold: int = Field(
default=7,
title='Threshold(In days)',
description=('The threshold for the reserved instance is '
'scheduled to end within the threshold.')
)
def aws_get_reserved_instances_about_to_retired_printer(output):
if output is None:
return
pprint.pprint({"Instances": output})
def aws_get_reserved_instances_about_to_retired(
handle,
region: str = "",
threshold: int = 7
) -> Tuple:
"""aws_get_reserved_instances_about_to_retired Returns an array
of reserved instances.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: Region to filter instances.
:type threshold: int
:param threshold: (in days) The threshold for the reserved
instance is scheduled to end within the threshold.
:rtype: Array of instances.
"""
now = datetime.now(timezone.utc)
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
ec2Client = handle.client('ec2', region_name=reg)
response = ec2Client.describe_reserved_instances()
for reserved_id in response["ReservedInstances"]:
instance_dict = {}
# check if the Reserved Instance is scheduled to end within the threshold
if reserved_id['State'] == 'active' and (reserved_id['End'] - now).days <= threshold:
instance_dict["instance_id"] = reserved_id["ReservedInstancesId"]
instance_dict["region"] = reg
result.append(instance_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_get_resources_missing_tag/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_resources_missing_tag/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_resources_missing_tag/aws_get_resources_missing_tag.json
================================================
{
"action_title": "AWS Get Resources Missing Tag",
"action_description": "Gets a list of all AWS resources that are missing the tag in the input parameters.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_resources_missing_tag",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: AWS/legos/aws_get_resources_missing_tag/aws_get_resources_missing_tag.py
================================================
from __future__ import annotations
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from typing import List, Dict
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
import pprint
class InputSchema(BaseModel):
region: str = Field(..., description='AWS Region.', title='Region')
tag: str = Field(..., description='The Tag to search for', title='tag')
def aws_get_resources_missing_tag_printer(output):
if output is None:
return
pprint.pprint(f"there are {len(output)} resources missing the tag. We can fix a max of 20." )
def aws_get_resources_missing_tag(handle, region: str, tag:str) -> List:
"""aws_get_resources_missing_tag Returns an List of Untagged Resources.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: str
:param region: Region to filter resources.
:rtype: List of untagged resources.
"""
ec2Client = handle.client('resourcegroupstaggingapi', region_name=region)
result = []
arnKeywordsToIgnore = ["sqlworkbench",
"AutoScalingManagedRule",
"sagarProxy",
"fsap-0f4d1bbd83f172783",
"experiment"]
try:
response = aws_get_paginator(ec2Client, "get_resources", "ResourceTagMappingList")
for resources in response:
if not resources["Tags"]:
#no tags at all!!
arnIgnore = False
for substring in arnKeywordsToIgnore:
if substring in resources["ResourceARN"]:
arnIgnore = True
if not arnIgnore:
# instance is missing tag
result.append(resources["ResourceARN"])
else:
#has tags
allTags = True
keyList = []
tagged_instance = resources['Tags']
#print(tagged_instance)
#get all the keys for the instance
for kv in tagged_instance:
key = kv["Key"]
keyList.append(key)
#see if the required tags are represented in the keylist
#if they are not - the instance is not in compliance
if tag not in keyList:
allTags = False
if not allTags:
arnIgnore = False
for substring in arnKeywordsToIgnore:
if substring in resources["ResourceARN"]:
arnIgnore = True
if not arnIgnore:
# instance is missing tag
result.append(resources["ResourceARN"])
except Exception as error:
result.append({"error":error})
return result
================================================
FILE: AWS/legos/aws_get_resources_with_expiration_tag/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_resources_with_expiration_tag/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_resources_with_expiration_tag/aws_get_resources_with_expiration_tag.json
================================================
{
"action_title": "AWS Get Resources With Expiration Tag",
"action_description": "AWS Get all Resources with an expiration tag",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_resources_with_expiration_tag",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: AWS/legos/aws_get_resources_with_expiration_tag/aws_get_resources_with_expiration_tag.py
================================================
from __future__ import annotations
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from typing import List, Dict
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
import pprint
class InputSchema(BaseModel):
region: str = Field(..., description='AWS Region.', title='Region')
tag: str = Field(..., description='The Tag to search for', title='tag')
def aws_get_resources_with_expiration_tag_printer(output):
if output is None:
return
pprint.pprint(f"there are {len(output)} resources with expiration tag." )
def aws_get_resources_with_expiration_tag(handle, region: str, tag:str) -> List:
ec2Client = handle.client('resourcegroupstaggingapi', region_name=region)
result = []
try:
response = aws_get_paginator(ec2Client, "get_resources", "ResourceTagMappingList")
for resources in response:
if resources["Tags"]:
#has tags
tags = resources['Tags']
for kv in resources['Tags']:
if kv["Key"] == tag:
#we have found an expiration tag
temp ={'arn': [resources["ResourceARN"]], 'expires':kv["Value"]}
print(temp)
result.append(temp)
except Exception as error:
result.append({"error":error})
return result
================================================
FILE: AWS/legos/aws_get_resources_with_tag/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_resources_with_tag/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_resources_with_tag/aws_get_resources_with_tag.json
================================================
{
"action_title": "AWS Get Resources With Tag",
"action_description": "For a given tag and region, get every AWS resource with that tag.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_resources_with_tag",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: AWS/legos/aws_get_resources_with_tag/aws_get_resources_with_tag.py
================================================
from __future__ import annotations
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from typing import List, Dict
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
import pprint
class InputSchema(BaseModel):
region: str = Field(..., description='AWS Region.', title='Region')
tag: str = Field(..., description='The Tag to search for', title='tag')
def aws_get_resources_with_tag_printer(output):
if output is None:
return
pprint.pprint(f"there are {len(output)} resources with the desired tag." )
def aws_get_resources_with_tag(handle, region: str, tag:str) -> List:
"""aws_get_resources_with_tag Returns an List of Untagged Resources.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: str
:param region: Region to filter resources.
:rtype: List of untagged resources.
"""
ec2Client = handle.client('resourcegroupstaggingapi', region_name=region)
result = []
try:
response = aws_get_paginator(ec2Client, "get_resources", "ResourceTagMappingList")
for resources in response:
if resources["Tags"]:
#has tags
#print(tagged_instance)
#get all the keys for the instance
for kv in resources['Tags']:
key = kv["Key"]
if tag == key:
temp = {"arn": resources["ResourceARN"], "value":kv["Value"]}
result.append(temp)
except Exception as error:
result.append({"error":error})
return result
================================================
FILE: AWS/legos/aws_get_s3_buckets/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_s3_buckets/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_s3_buckets/aws_get_s3_buckets.json
================================================
{
"action_title": "Get AWS S3 Buckets",
"action_description": "Get AWS S3 Buckets",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_s3_buckets",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS" ,"CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_S3" ]
}
================================================
FILE: AWS/legos/aws_get_s3_buckets/aws_get_s3_buckets.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region.')
def aws_get_s3_buckets_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_s3_buckets(handle, region: str) -> List:
"""aws_get_s3_buckets List all the S3 buckets.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: location of the bucket
:rtype: List of all the S3 buckets
"""
s3Session = handle.resource("s3", region_name=region)
try:
response = s3Session.buckets.all()
result = []
for bucket in response:
result.append(bucket.name)
except Exception:
pass
return result
================================================
FILE: AWS/legos/aws_get_schedule_to_retire_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_secret_from_secretmanager/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_secret_from_secretmanager/aws_get_secret_from_secretmanager.json
================================================
{
"action_title": " Get secrets from secretsmanager",
"action_description": " Get secrets from AWS secretsmanager",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_secret_from_secretmanager",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_SECRET_MANAGER"]
}
================================================
FILE: AWS/legos/aws_get_secret_from_secretmanager/aws_get_secret_from_secretmanager.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
## @author: Yugal Pachpande, @email: yugal.pachpande@unskript.com
##
import pprint
from pydantic import BaseModel, Field
from botocore.exceptions import ClientError
class InputSchema(BaseModel):
SecretId: str = Field(
title='Secret Name',
description='Name of the secret.')
region: str = Field(
title='Region',
description='AWS Region.')
def aws_get_secret_from_secretmanager_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_secret_from_secretmanager(handle, SecretId: str, region: str) -> str:
"""aws_get_secrets_from_secretsmanager returns The decrypted secret value
:type handle: object
:param handle: Object returned from task.validate(...).
:type SecretId: string
:param SecretId: Name of the secret.
:type region: string
:param region: AWS Region.
:rtype: The decrypted secret value
"""
secretsmanager_client = handle.client(service_name='secretsmanager', region_name=region)
try:
response = secretsmanager_client.get_secret_value(SecretId=SecretId)
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
print("The requested secret " + SecretId + " was not found")
elif e.response['Error']['Code'] == 'InvalidRequestException':
print("The request was invalid due to:", e)
elif e.response['Error']['Code'] == 'InvalidParameterException':
print("The request had invalid params:", e)
elif e.response['Error']['Code'] == 'DecryptionFailure':
print("The requested secret can't be decrypted using the provided KMS key:", e)
elif e.response['Error']['Code'] == 'InternalServiceError':
print("An error occurred on service side:", e)
else:
# Secrets Manager decrypts the secret value using the associated KMS CMK
# Depending on whether the secret was a string or binary, only one of
# these fields will be populated
if 'SecretString' in response:
text_secret_data = response['SecretString']
pprint.pprint(text_secret_data)
return text_secret_data
binary_secret_data = response['SecretBinary']
pprint.pprint(binary_secret_data)
return binary_secret_data
================================================
FILE: AWS/legos/aws_get_secrets_manager_secret/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_secrets_manager_secret/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_secrets_manager_secret/aws_get_secrets_manager_secret.json
================================================
{
"action_title": "AWS Get Secrets Manager Secret",
"action_description": "Get string (of JSON) containing Secret details",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_secrets_manager_secret",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_SECRET_MANAGER"]
}
================================================
FILE: AWS/legos/aws_get_secrets_manager_secret/aws_get_secrets_manager_secret.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from __future__ import annotations
import pprint
from pydantic import BaseModel, Field
from beartype import beartype
from botocore.exceptions import ClientError
class InputSchema(BaseModel):
region: str = Field(..., description='AWS Region.', title='Region')
secret_name: str = Field(
description='AWS Secret Name', title='secret_name'
)
@beartype
def aws_get_secrets_manager_secret_printer(output):
if output is None:
return
pprint.pprint({"secret": output})
@beartype
@beartype
def aws_get_secrets_manager_secret(handle, region: str, secret_name:str) -> str:
# Create a Secrets Manager client
client = handle.client(
service_name='secretsmanager',
region_name=region
)
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
# For a list of exceptions thrown, see
# https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html
raise e
#print(get_secret_value_response)
# Decrypts secret using the associated KMS key.
secret = get_secret_value_response['SecretString']
return secret
================================================
FILE: AWS/legos/aws_get_secrets_manager_secretARN/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_secrets_manager_secretARN/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_secrets_manager_secretARN/aws_get_secrets_manager_secretARN.json
================================================
{
"action_title": "AWS Get Secrets Manager SecretARN",
"action_description": "Given a Secret Name - this Action returns the Secret ARN",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_secrets_manager_secretARN",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_SECRET_MANAGER"]
}
================================================
FILE: AWS/legos/aws_get_secrets_manager_secretARN/aws_get_secrets_manager_secretARN.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from __future__ import annotations
import pprint
from pydantic import BaseModel, Field
from beartype import beartype
from botocore.exceptions import ClientError
class InputSchema(BaseModel):
region: str = Field(
description='AWS Region.',
title='Region'
)
secret_name: str = Field(
description='AWS Secret Name',
title='secret_name'
)
@beartype
def aws_get_secrets_manager_secretARN_printer(output):
if output is None:
return
pprint.pprint({"secret": output})
@beartype
def aws_get_secrets_manager_secretARN(handle, region: str, secret_name:str) -> str:
# Create a Secrets Manager client
client = handle.client(
service_name='secretsmanager',
region_name=region
)
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
# For a list of exceptions thrown, see
# https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html
raise e
# Decrypts secret using the associated KMS key.
secretArn = get_secret_value_response['ARN']
return secretArn
================================================
FILE: AWS/legos/aws_get_security_group_details/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_security_group_details/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_security_group_details/aws_get_security_group_details.json
================================================
{
"action_title": "Get AWS Security Group Details",
"action_description": "Get details about a security group, given its ID.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_security_group_details",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2"]
}
================================================
FILE: AWS/legos/aws_get_security_group_details/aws_get_security_group_details.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
group_id: str = Field(
title='Security Group ID',
description='AWS Security Group ID. For eg: sg-12345')
region: str = Field(
title='Region',
description='AWS Region'
)
def aws_get_security_group_details_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_security_group_details(handle, group_id: str, region: str) -> Dict:
"""aws_get_security_group_details returns The decrypted secret value
:type handle: object
:param handle: Object returned from task.validate(...).
:type group_id: string
:param group_id: AWS Security Group ID. For eg: sg-12345
:type region: string
:param region: AWS Region.
:rtype: The decrypted secret value
"""
ec2Client = handle.client('ec2', region_name=region)
res = ec2Client.describe_security_groups(GroupIds=[group_id])
return res
================================================
FILE: AWS/legos/aws_get_service_quota_details/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_service_quota_details/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_service_quota_details/aws_get_service_quota_details.json
================================================
{
"action_title": "AWS Get Service Quota for a Specific ServiceName",
"action_description": "Given an AWS Region, Service Code and Quota Code, this Action will output the quota information for the specified service.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_service_quota_details",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_get_service_quota_details/aws_get_service_quota_details.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from __future__ import annotations
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from beartype import beartype
class InputSchema(BaseModel):
quota_code: str = Field(
description='The quota code for the Service Type',
title='quota_code',
)
region: str = Field(..., description='AWS Region.', title='Region')
service_code: str = Field(
description='The service code to be queried', title='service_code'
)
@beartype
def aws_get_service_quota_details_printer(output):
if output is None:
return
pprint.pprint(output)
#list_service_quotas
#list_aws_default_service_quotas
@beartype
def aws_get_service_quota_details(handle, service_code:str, quota_code:str, region:str) -> Dict:
sqClient = handle.client('service-quotas',region_name=region)
res = sqClient.get_service_quota(
ServiceCode=service_code,
QuotaCode=quota_code)
#res = sqClient.list_services(MaxResults = 100)
return res
================================================
FILE: AWS/legos/aws_get_service_quotas/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_service_quotas/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_service_quotas/aws_get_service_quotas.json
================================================
{
"action_title": "AWS Get Quotas for a Service",
"action_description": "Given inputs of the AWS Region, and the Service_Code for a service, this Action will output all of the Service Quotas and limits.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_service_quotas",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_get_service_quotas/aws_get_service_quotas.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from __future__ import annotations
import pprint
from typing import List
from pydantic import BaseModel, Field
from beartype import beartype
from unskript.connectors.aws import aws_get_paginator
class InputSchema(BaseModel):
region: str = Field(..., description='AWS region', title='region')
service_code: str = Field(
'ec2',
description='The service code is used to get all quotas for the service',
title='service_code',
)
@beartype
def aws_get_service_quotas_printer(output):
if output is None:
return
pprint.pprint(output)
#list_service_quotas
#list_aws_default_service_quotas
@beartype
def aws_get_service_quotas(handle, service_code:str, region:str) -> List:
sqClient = handle.client('service-quotas',region_name=region)
resPaginate = aws_get_paginator(sqClient,'list_service_quotas','Quotas',
ServiceCode=service_code,
PaginationConfig={
'MaxItems': 1000,
'PageSize': 100
})
#res = sqClient.list_services(MaxResults = 100)
return resPaginate
================================================
FILE: AWS/legos/aws_get_stopped_instance_volumes/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_stopped_instance_volumes/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_stopped_instance_volumes/aws_get_stopped_instance_volumes.json
================================================
{
"action_title": "Get Stopped Instance Volumes",
"action_description": "This action helps to list the volumes that are attached to stopped instances.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_stopped_instance_volumes",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check":true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_next_hop": ["a9d17f4c9feb963b6096290eedb21af43d89e803cdcb1238dc11a544a3071a1e"],
"action_next_hop_parameter_mapping": {"a9d17f4c9feb963b6096290eedb21af43d89e803cdcb1238dc11a544a3071a1e": {"name": "Delete EBS Volume Attached to Stopped Instances", "region":".[0].region","volume_ids":"map(.volume_id)"}},
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2","CATEGORY_TYPE_AWS_EBS" ]
}
================================================
FILE: AWS/legos/aws_get_stopped_instance_volumes/aws_get_stopped_instance_volumes.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='AWS Region.')
def aws_get_stopped_instance_volumes_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_stopped_instance_volumes(handle, region: str = "") -> Tuple:
"""aws_get_stopped_instance_volumes Returns an array of volumes.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: Region to filter instances.
:rtype: Array of volumes.
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
ec2Client = handle.client('ec2', region_name=reg)
res = aws_get_paginator(ec2Client, "describe_instances", "Reservations")
for reservation in res:
for instance in reservation['Instances']:
if instance['State']['Name'] == 'stopped':
block_device_mappings = instance['BlockDeviceMappings']
for mapping in block_device_mappings:
if 'Ebs' in mapping:
ebs_volume = {}
volume_id = mapping['Ebs']['VolumeId']
ebs_volume["volume_id"] = volume_id
ebs_volume["region"] = reg
result.append(ebs_volume)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_get_sts_caller_identity/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_sts_caller_identity/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_sts_caller_identity/aws_get_sts_caller_identity.json
================================================
{
"action_title": "Get STS Caller Identity",
"action_description": "Get STS Caller Identity",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_sts_caller_identity",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_STS"]
}
================================================
FILE: AWS/legos/aws_get_sts_caller_identity/aws_get_sts_caller_identity.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def aws_get_sts_caller_identity_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_sts_caller_identity(handle) -> Dict:
"""aws_get_caller_identity Returns an dict of STS caller identity info.
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: dict of STS caller identity info
"""
ec2Client = handle.client('sts')
response = ec2Client.get_caller_identity()
return response
================================================
FILE: AWS/legos/aws_get_tags_of_all_resources/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_tags_of_all_resources/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_tags_of_all_resources/aws_get_tags_of_all_resources.json
================================================
{
"action_title": "AWS Get Tags of All Resources",
"action_description": "AWS Get Tags of All Resources",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_tags_of_all_resources",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_get_tags_of_all_resources/aws_get_tags_of_all_resources.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region.')
def aws_get_tags_of_all_resources_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_tags_of_all_resources(handle, region: str) -> List:
"""aws_get_tags_of_all_resources Returns an List of all Resources Tags.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: str
:param region: Region to filter resources.
:rtype: List of all Resources Tags.
"""
ec2Client = handle.client('resourcegroupstaggingapi', region_name=region)
result = []
try:
response = aws_get_paginator(ec2Client, "get_tag_keys", "TagKeys")
result = response
except Exception as error:
result.append({"error":error})
return result
================================================
FILE: AWS/legos/aws_get_timed_out_lambdas/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_timed_out_lambdas/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_timed_out_lambdas/aws_get_timed_out_lambdas.json
================================================
{
"action_title": "Get Timed Out AWS Lambdas",
"action_description": "Get AWS Lambda functions that have exceeded the maximum amount of time in seconds that a Lambda function can run.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_timed_out_lambdas",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ELB"],
"action_next_hop": [],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: AWS/legos/aws_get_timed_out_lambdas/aws_get_timed_out_lambdas.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Tuple, Optional
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
import pprint
import datetime
class InputSchema(BaseModel):
days_back: Optional[int] = Field(
1,
description='(in days) Number of days to go back. Default value is 1 day.',
title='Days Back',
)
region: Optional[str] = Field(
'',
description='AWS region. Eg: "us-west-2"',
title='Region'
)
def aws_get_timed_out_lambdas_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_timed_out_lambdas(handle, days_back:int=1, region:str="") -> Tuple:
"""aws_get_timed_out_lambdas finds AWS Lambda functions with high error rate
:type region: string
:param region: AWS region. Eg: "us-west-2"
:type days_back: int
:param days_back: (in days) Number of days to go back. Default value is 1 day.
:rtype: Tuple with status result and list of Lambda functions that have timed out
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
lambdaClient = handle.client('lambda', region_name=reg)
cloudwatchClient = handle.client('cloudwatch', region_name=reg)
# Get a list of all the Lambda functions in your account
response = lambdaClient.list_functions()
number_of_days = int(days_back)
start_time = datetime.datetime.now() - datetime.timedelta(days=number_of_days)
# Iterate through the list of functions and filter out the ones that have timed out
for function in response['Functions']:
# Get the configuration for the function
config_response = lambdaClient.get_function_configuration(FunctionName=function['FunctionName'])
# Check if the function has a timeout set and if it has timed out
if 'Timeout' in config_response and config_response['Timeout'] > 0:
metrics_response = cloudwatchClient.get_metric_data(
MetricDataQueries=[
{
'Id': 'm1',
'MetricStat': {
'Metric': {
'Namespace': 'AWS/Lambda',
'MetricName': 'Duration',
'Dimensions': [
{
'Name': 'FunctionName',
'Value': function['FunctionName']
},
]
},
'Period': 300,
'Stat': 'p90'
},
'ReturnData': True
},
],
StartTime=start_time,
EndTime=datetime.datetime.now()
)
# Check if the function has timed out
if len(metrics_response['MetricDataResults'][0]['Values'])!=0:
if metrics_response['MetricDataResults'][0]['Values'][0] >= config_response['Timeout'] * 1000:
lambda_func = {}
lambda_func['function_name'] = function['FunctionName']
lambda_func['region'] = reg
result.append(lambda_func)
else:
continue
except Exception:
pass
if len(result) != 0:
return (False, result)
else:
return (True, None)
================================================
FILE: AWS/legos/aws_get_ttl_for_route53_records/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_ttl_for_route53_records/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_ttl_for_route53_records/aws_get_ttl_for_route53_records.json
================================================
{
"action_title": "AWS Get TTL For Route53 Records",
"action_description": "Get TTL for Route53 records for a hosted zone.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_ttl_for_route53_records",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ROUTE53"]
}
================================================
FILE: AWS/legos/aws_get_ttl_for_route53_records/aws_get_ttl_for_route53_records.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
class InputSchema(BaseModel):
hosted_zone_id: str = Field(
...,
description='ID of the Hosted zone used for routing traffic.',
title='Hosted Zone ID',
)
def aws_get_ttl_for_route53_records_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_ttl_for_route53_records(handle, hosted_zone_id:str) -> List:
"""aws_get_ttl_for_route53_records Returns TTL for records in a hosted zone
:type handle: object
:param handle: Object returned by the task.validate(...) method.
:type hosted_zone_id: str
:param hosted_zone_id: ID of the Hosted zone used for routing traffic.
:rtype: List of details with the record type, record name and record TTL.
"""
route53Client = handle.client('route53')
response = aws_get_paginator(
route53Client,
"list_resource_record_sets",
"ResourceRecordSets",
HostedZoneId=hosted_zone_id
)
result = []
for record in response:
records = {}
record_name = record.get('Name')
record_type = record.get('Type')
record_ttl = record.get('TTL', 'N/A')
records["record_name"] = record_name
records["record_type"] = record_type
records["record_ttl"] = record_ttl
result.append(records)
return result
================================================
FILE: AWS/legos/aws_get_ttl_under_given_hours/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_ttl_under_given_hours/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_ttl_under_given_hours/aws_get_ttl_under_given_hours.json
================================================
{
"action_title": "AWS: Check for short Route 53 TTL",
"action_description": "AWS: Check for short Route 53 TTL",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_ttl_under_given_hours",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_supports_poll": true,
"action_is_check": true,
"action_next_hop": ["a0773e52a3a3a8688e47a9e10eba1c680913d28a9a8c4466113181534bd1f972"],
"action_next_hop_parameter_mapping": {"a0773e52a3a3a8688e47a9e10eba1c680913d28a9a8c4466113181534bd1f972": {"name": "Change AWS Route53 TTL", "hosted_zone_id": "map(.hosted_zone_id)", "record_name": "map(.record_name)", "record_type": "map(.record_type)"}},
"action_categories": ["CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ROUTE53"]
}
================================================
FILE: AWS/legos/aws_get_ttl_under_given_hours/aws_get_ttl_under_given_hours.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Tuple, Optional
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_get_ttl_for_route53_records.aws_get_ttl_for_route53_records import aws_get_ttl_for_route53_records
class InputSchema(BaseModel):
threshold: Optional[int] = Field(
default=1,
description=('(In hours) A threshold in hours to verify route '
'53 TTL is within the threshold.'),
title='Threshold (In hours)',
)
def aws_get_ttl_under_given_hours_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_ttl_under_given_hours(handle, threshold: int = 1) -> Tuple:
"""aws_get_ttl_under_x_hours Returns TTL for records in a hosted zone
:type handle: object
:param handle: Object returned by the task.validate(...) method.
:type threshold: str
:param threshold: (In hours) A threshold in hours to verify route
53 TTL is within the threshold.
:rtype: List of details with the record type, record name and record TTL.
"""
if handle is None:
raise ValueError("Handle must not be None.")
result = []
try:
route_client = handle.client('route53')
seconds = threshold * 3600
hosted_zones = aws_get_paginator(route_client, "list_hosted_zones", "HostedZones")
for zone in hosted_zones:
zone_id = zone.get('Id')
if not zone_id:
continue
record_ttl_data = aws_get_ttl_for_route53_records(handle, zone_id)
for record_ttl in record_ttl_data:
if 'record_ttl' not in record_ttl or isinstance(record_ttl['record_ttl'], str):
continue
elif record_ttl['record_ttl'] < seconds:
records = {
"hosted_zone_id": zone_id,
"record_name": record_ttl.get('record_name', ''),
"record_type": record_ttl.get('record_type', ''),
"record_ttl": record_ttl['record_ttl'],
}
result.append(records)
except Exception as e:
raise e
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_get_unhealthy_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_unhealthy_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_unhealthy_instances/aws_get_unhealthy_instances.json
================================================
{
"action_title": "Get UnHealthy EC2 Instances for Classic ELB",
"action_description": "Get UnHealthy EC2 Instances for Classic ELB",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_unhealthy_instances",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ELB"]
}
================================================
FILE: AWS/legos/aws_get_unhealthy_instances/aws_get_unhealthy_instances.py
================================================
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
elb_name: str = Field(
title='ELB Name',
description='Name of the ELB. NOTE: It ONLY supports Classic.')
region: str = Field(
title='Region',
description='Name of the AWS Region'
)
def aws_get_unhealthy_instances_printer(output):
if output is None:
return
if output == []:
print("All instances are healthy")
else:
pprint.pprint(output)
def aws_get_unhealthy_instances(handle, elb_name: str, region: str) -> List:
"""aws_get_unhealthy_instances returns array of unhealthy instances
:type handle: object
:param handle: Object returned from task.validate(...).
:type elb_name: string
:param elb_name: Name of the ELB. Note: It ONLY supports Classic.
:type region: string
:param region: Name of the AWS Region.
:rtype: Returns array of unhealthy instances
"""
elbClient = handle.client('elb', region_name=region)
res = elbClient.describe_instance_health(
LoadBalancerName=elb_name,
)
unhealthy_instances = []
for instance in res['InstanceStates']:
if instance['State'] == "OutOfService":
unhealthy_instances.append(instance)
return unhealthy_instances
================================================
FILE: AWS/legos/aws_get_unhealthy_instances_from_elb/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_unhealthy_instances_from_elb/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_unhealthy_instances_from_elb/aws_get_unhealthy_instances_from_elb.json
================================================
{
"action_title": "Get Unhealthy instances from ELB",
"action_description": "Get Unhealthy instances from Elastic Load Balancer",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_unhealthy_instances_from_elb",
"action_needs_credential": true,
"action_is_check": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" , "CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ELB"],
"action_next_hop": ["94707558cebedbcb77aabaec5d6d2d1bf3f4664db6e9e905d6d905a11a3ef8bc"],
"action_next_hop_parameter_mapping": {"94707558cebedbcb77aabaec5d6d2d1bf3f4664db6e9e905d6d905a11a3ef8bc": {"name": "AWS Get unhealthy EC2 instances from ELB", "region": ".[0].region", "elb_name":"map(.load_balancer_name)"}}
}
================================================
FILE: AWS/legos/aws_get_unhealthy_instances_from_elb/aws_get_unhealthy_instances_from_elb.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
elb_name: Optional[str] = Field(
default="",
title='ELB Name',
description='Name of the elastic load balancer.')
region: Optional[str] = Field(
default="",
title='Region',
description='AWS Region of the ELB.')
def aws_get_unhealthy_instances_from_elb_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_unhealthy_instances_from_elb(handle, elb_name: str = "", region: str = "") -> Tuple:
"""aws_get_unhealthy_instances_from_elb gives unhealthy instances from ELB
:type elb_name: string
:param elb_name: Name of the elastic load balancer.
:type region: string
:param region: AWS region.
:rtype: A tuple with execution results and a list of unhealthy instances from ELB
"""
result = []
all_regions = [region] if region else aws_list_all_regions(handle)
elb_list = []
# Handling the logic when elb_name is not provided
if not elb_name:
for reg in all_regions:
print(reg)
try:
asg_client = handle.client('elb', region_name=reg)
response = aws_get_paginator(asg_client, "describe_load_balancers", "LoadBalancerDescriptions")
for i in response:
elb_list.append({"load_balancer_name": i["LoadBalancerName"], "region": reg})
except Exception:
pass
# Handling the logic when only elb_name is provided
if elb_name and not region:
for reg in all_regions:
try:
asg_client = handle.client('elb', region_name=reg)
response = aws_get_paginator(asg_client, "describe_load_balancers", "LoadBalancerDescriptions")
for i in response:
if elb_name in i["LoadBalancerName"]:
elb_list.append({"load_balancer_name": i["LoadBalancerName"], "region": reg})
except Exception:
pass
# Handling the logic when both elb_name and region are provided
if elb_name and region:
try:
elbClient = handle.client('elb', region_name=region)
res = elbClient.describe_instance_health(LoadBalancerName=elb_name)
for instance in res['InstanceStates']:
if instance['State'] == "OutOfService":
result.append({
"instance_id": instance["InstanceId"],
"region": region,
"load_balancer_name": elb_name
})
except Exception as e:
raise e
# Handling the logic when elb_list is populated
for elb in elb_list:
try:
elbClient = handle.client('elb', region_name=elb["region"])
res = elbClient.describe_instance_health(LoadBalancerName=elb["load_balancer_name"])
for instance in res['InstanceStates']:
if instance['State'] == "OutOfService":
result.append({
"instance_id": instance["InstanceId"],
"region": elb["region"],
"load_balancer_name": elb["load_balancer_name"]
})
except Exception as e:
raise e
return (False, result) if result else (True, None)
================================================
FILE: AWS/legos/aws_get_unused_route53_health_checks/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_unused_route53_health_checks/__init__.py
================================================
================================================
FILE: AWS/legos/aws_get_unused_route53_health_checks/aws_get_unused_route53_health_checks.json
================================================
{
"action_title": "AWS get Unused Route53 Health Checks",
"action_description": "AWS get Unused Route53 Health Checks",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_unused_route53_health_checks",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_supports_poll": true,
"action_is_check": true,
"action_next_hop": ["10a363abaf49098a0376eae46a6bfac421e606952369fc6ea02768ad319dd0be"],
"action_next_hop_parameter_mapping": {"10a363abaf49098a0376eae46a6bfac421e606952369fc6ea02768ad319dd0be": {"name": "Delete Unused Route53 HealthChecks", "health_check_ids": "."}},
"action_categories": ["CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ROUTE53"]
}
================================================
FILE: AWS/legos/aws_get_unused_route53_health_checks/aws_get_unused_route53_health_checks.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
class InputSchema(BaseModel):
hosted_zone_id: Optional[str] = Field(
default='',
description='Used to filter the health checks for a specific hosted zone.',
title='Hosted Zone ID',
)
def aws_get_unused_route53_health_checks_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_unused_route53_health_checks(handle, hosted_zone_id: str = "") -> Tuple:
"""aws_get_unused_route53_health_checks Returns a list of unused Route 53 health checks.
:type hosted_zone_id: string
:param hosted_zone_id: Optional. Used to filter the health checks for a specific
hosted zone.
:rtype: A tuple containing a list of dicts with information about the unused health checks.
"""
result = []
try:
route_client = handle.client('route53')
health_checks = aws_get_paginator(route_client, "list_health_checks", "HealthChecks")
if hosted_zone_id:
hosted_zones = [{'Id': hosted_zone_id}]
else:
hosted_zones = aws_get_paginator(route_client, "list_hosted_zones", "HostedZones")
used_health_check_ids = set()
for zone in hosted_zones:
record_sets = aws_get_paginator(
route_client,
"list_resource_record_sets",
"ResourceRecordSets",
HostedZoneId=zone['Id']
)
for record_set in record_sets:
if 'HealthCheckId' in record_set:
used_health_check_ids.add(record_set['HealthCheckId'])
for hc in health_checks:
if hc['Id'] not in used_health_check_ids:
result.append(hc['Id'])
except Exception as e:
raise e
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_get_users_with_old_access_keys/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_get_users_with_old_access_keys/__init__.py
================================================
##
## Copyright (c) 2022 unSkript, Inc
## All rights reserved.
##
================================================
FILE: AWS/legos/aws_get_users_with_old_access_keys/aws_get_users_with_old_access_keys.json
================================================
{
"action_title": "AWS Get IAM Users with Old Access Keys",
"action_description": "This Lego collects the access keys that have never been used or the access keys that have been used but are older than the threshold.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_get_users_with_old_access_keys",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_IAM"]
}
================================================
FILE: AWS/legos/aws_get_users_with_old_access_keys/aws_get_users_with_old_access_keys.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List
from datetime import datetime, timezone
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
class InputSchema(BaseModel):
threshold_in_days: int = Field(
default = 120,
title="Threshold (In days)",
description=("(in days) The threshold to check the IAM user access "
"keys older than the threshold.")
)
def aws_get_users_with_old_access_keys_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_get_users_with_old_access_keys(handle, threshold_in_days: int = 120) -> List:
"""aws_get_users_with_old_access_keys lists all the IAM users with access keys
:type handle: object
:param handle: Object returned from Task Validate
:type threshold_in_days: int
:param threshold_in_days: (in days) The threshold to check the IAM user
access keys older than the threshold.
:rtype: Result List of all IAM users with access keys.
"""
client = handle.client('iam')
result = []
try:
response = aws_get_paginator(client, "list_users", "Users")
except Exception as e:
return result.append({"error": e})
for user in response:
try:
# Get a list of the user's access keys
access_keys = client.list_access_keys(UserName=user['UserName'])
except Exception:
continue
for access_key in access_keys['AccessKeyMetadata']:
iam_data = {}
try:
access_key_info = client.get_access_key_last_used(
AccessKeyId=access_key['AccessKeyId']
)
except Exception:
continue
if 'LastUsedDate' not in access_key_info['AccessKeyLastUsed']:
iam_data["access_key"] = access_key['AccessKeyId']
iam_data["iam_user"] = user['UserName']
iam_data["last_used_days_ago"] = 'Never Used'
result.append(iam_data)
else:
# Get the last used date of the access key
last_used = access_key_info['AccessKeyLastUsed']['LastUsedDate']
days_since_last_used = (datetime.now(timezone.utc) - last_used).days
# Check if the access key was last used more than 90 days ago
if days_since_last_used > threshold_in_days:
iam_data["access_key"] = access_key['AccessKeyId']
iam_data["iam_user"] = user['UserName']
iam_data["last_used_days_ago"] = days_since_last_used
result.append(iam_data)
return result
================================================
FILE: AWS/legos/aws_launch_instance_from_ami/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_launch_instance_from_ami/__init__.py
================================================
================================================
FILE: AWS/legos/aws_launch_instance_from_ami/aws_launch_instance_from_ami.json
================================================
{
"action_title": "Launch AWS EC2 Instance From an AMI",
"action_description": "Use this instance to Launch an AWS EC2 instance from an AMI",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_launch_instance_from_ami",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2"]
}
================================================
FILE: AWS/legos/aws_launch_instance_from_ami/aws_launch_instance_from_ami.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
ami_id: str = Field(
title='AMI Id',
description='AMI Id.')
region: str = Field(
title='Region',
description='AWS Region.')
def aws_launch_instance_from_ami_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_launch_instance_from_ami(handle, ami_id: str, region: str) -> List:
"""aws_launch_instance_from_ami Launch instances from a particular image.
:type handle: object
:param handle: Object returned from task.validate(...).
:type ami_id: string
:param ami_id: AMI Id Information required to launch an instance.
:type region: string
:param region: Region to filter instances.
:rtype: Dict with launched instances info.
"""
ec2Client = handle.client('ec2', region_name=region)
res = ec2Client.run_instances(ImageId=ami_id, MinCount=1, MaxCount=1)
return res['Instances']
================================================
FILE: AWS/legos/aws_list_access_keys/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_list_access_keys/__init__.py
================================================
##
## Copyright (c) 2022 unSkript, Inc
## All rights reserved.
##
================================================
FILE: AWS/legos/aws_list_access_keys/aws_list_access_keys.json
================================================
{
"action_title": "AWS List Access Key",
"action_description": "List all Access Keys for the User",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_list_access_keys",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_IAM"]
}
================================================
FILE: AWS/legos/aws_list_access_keys/aws_list_access_keys.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
aws_username: str = Field(
title="Username",
description="Username of the IAM User"
)
def aws_list_access_keys_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_list_access_keys(
handle,
aws_username: str
) -> Dict:
"""aws_list_access_keys lists all the access keys for a user
:type handle: object
:param handle: Object returned from Task Validate
:type aws_username: str
:param aws_username: Username of the IAM user to be looked up
:rtype: Result Dictionary of result
"""
iamClient = handle.client('iam')
result = iamClient.list_access_keys(UserName=aws_username)
retVal = {}
temp_list = []
for key, value in result.items():
if key not in temp_list:
temp_list.append(key)
retVal[key] = value
return retVal
================================================
FILE: AWS/legos/aws_list_all_iam_users/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_list_all_iam_users/__init__.py
================================================
##
## Copyright (c) 2022 unSkript, Inc
## All rights reserved.
##
================================================
FILE: AWS/legos/aws_list_all_iam_users/aws_list_all_iam_users.json
================================================
{
"action_title": "AWS List All IAM Users",
"action_description": "List all AWS IAM Users",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_list_all_iam_users",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["list"],
"action_nouns": ["users","iam","aws"],
"action_is_check": false,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_IAM"]
}
================================================
FILE: AWS/legos/aws_list_all_iam_users/aws_list_all_iam_users.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def aws_list_all_iam_users_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_list_all_iam_users(handle) -> List:
"""aws_list_all_iam_users lists all the IAM users
:type handle: object
:param handle: Object returned from Task Validate
:rtype: Result List of all IAM users
"""
client = handle.client('iam')
users_list=[]
response = client.list_users()
try:
for x in response['Users']:
users_list.append(x['UserName'])
except Exception as e:
users_list.append(e)
return users_list
================================================
FILE: AWS/legos/aws_list_all_regions/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_list_all_regions/__init__.py
================================================
##
## Copyright (c) 2022 unSkript, Inc
## All rights reserved.
##
================================================
FILE: AWS/legos/aws_list_all_regions/aws_list_all_regions.json
================================================
{
"action_title": "AWS List All Regions",
"action_description": "List all available AWS Regions",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_list_all_regions",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["list"],
"action_nouns": ["regions","aws"],
"action_is_check": false,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_list_all_regions/aws_list_all_regions.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
import pprint
from typing import List
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def aws_list_all_regions_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_list_all_regions(handle) -> List:
"""aws_list_all_regions lists all the AWS regions
:type handle: object
:param handle: Object returned from Task Validate
:rtype: Result List of result
"""
result = handle.aws_cli_command(
"aws ec2 --region us-west-2 describe-regions --all-regions --query 'Regions[].{Name:RegionName}' --output text"
)
if result is None or result.returncode != 0:
print(f"Error while executing command : {result}")
return str()
result_op = list(result.stdout.split("\n"))
list_region = [x for x in result_op if x != '']
return list_region
================================================
FILE: AWS/legos/aws_list_application_loadbalancers/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_list_application_loadbalancers/__init__.py
================================================
================================================
FILE: AWS/legos/aws_list_application_loadbalancers/aws_list_application_loadbalancers.json
================================================
{
"action_title": "AWS List Application LoadBalancers ARNs",
"action_description": "AWS List Application LoadBalancers ARNs",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_list_application_loadbalancers",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ELB"]
}
================================================
FILE: AWS/legos/aws_list_application_loadbalancers/aws_list_application_loadbalancers.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, List
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
class InputSchema(BaseModel):
region: Optional[str] = Field(
title='Region of the Classic Loadbalancer',
description='Region of the Classic loadbalancer.'
)
def aws_list_application_loadbalancers_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_list_application_loadbalancers(handle, region: str) -> List:
"""aws_list_application_loadbalancers lists application loadbalancers ARNs.
:type handle: object
:param handle: Object returned from task.validate(...).
:type region: string
:param region: Region of the Classic loadbalancer.
:rtype: List with all the application loadbalancer ARNs
"""
result = []
try:
ec2Client = handle.client('elbv2', region_name=region)
resp = aws_get_paginator(ec2Client, "describe_load_balancers", "LoadBalancers")
for elb in resp:
if elb['Type'] == "application":
result.append(elb['LoadBalancerArn'])
except Exception:
pass
return result
================================================
FILE: AWS/legos/aws_list_attached_user_policies/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_list_attached_user_policies/__init__.py
================================================
================================================
FILE: AWS/legos/aws_list_attached_user_policies/aws_list_attached_user_policies.json
================================================
{
"action_title": "AWS List Attached User Policies",
"action_description": "AWS List Attached User Policies",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_list_attached_user_policies",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_IAM"]
}
================================================
FILE: AWS/legos/aws_list_attached_user_policies/aws_list_attached_user_policies.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from botocore.exceptions import ClientError
class InputSchema(BaseModel):
user_name: str = Field(
title='User Name',
description='IAM user whose policies need to fetched.')
def aws_list_attached_user_policies_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_list_attached_user_policies(handle, user_name: str) -> List:
"""aws_list_attached_user_policies returns the list of policies attached to the user.
:type handle: object
:param handle: Object returned from task.validate(...).
:type user_name: string
:param user_name: IAM user whose policies need to fetched.
:rtype: List with with the attched policy names.
"""
result = []
ec2Client = handle.client('iam')
try:
response = ec2Client.list_attached_user_policies(UserName=user_name)
for i in response["AttachedPolicies"]:
result.append(i['PolicyName'])
except ClientError as error:
result.append(error.response)
return result
================================================
FILE: AWS/legos/aws_list_clusters_with_low_utilization/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_list_clusters_with_low_utilization/__init__.py
================================================
================================================
FILE: AWS/legos/aws_list_clusters_with_low_utilization/aws_list_clusters_with_low_utilization.json
================================================
{
"action_title": "AWS List ECS Clusters with Low CPU Utilization",
"action_description": "This action searches for clusters that have low CPU utilization.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_list_clusters_with_low_utilization",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check":true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_next_hop": ["6ad946fb1afd19286a8e7771e0f8e5566e4fdd54e3e2473385b5ac8e206e0a49"],
"action_next_hop_parameter_mapping": {"6ad946fb1afd19286a8e7771e0f8e5566e4fdd54e3e2473385b5ac8e206e0a49": {"name": "Delete ECS Clusters with Low CPU Utilization", "region": ".[0].region", "cluster_names":"map(.cluster_name)"}},
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2","CATEGORY_TYPE_AWS_EBC" ]
}
================================================
FILE: AWS/legos/aws_list_clusters_with_low_utilization/aws_list_clusters_with_low_utilization.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='AWS Region.')
threshold: Optional[int] = Field(
default=10,
title='Threshold (In percent)',
description='Threshold to check for cpu utilization is less than threshold.')
def aws_list_clusters_with_low_utilization_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_list_clusters_with_low_utilization(handle, region: str = "", threshold: int = 10) -> Tuple:
"""aws_list_clusters_with_low_utilization Returns an array of ecs clusters.
:type region: string
:param region: AWS Region.
:type threshold: int
:param threshold: (In percent) Threshold to check for cpu utilization
is less than threshold.
:rtype: List of clusters for low CPU utilization
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
ecs_Client = handle.client('ecs', region_name=reg)
response = aws_get_paginator(ecs_Client, "list_clusters", "clusterArns")
for cluster in response:
cluster_dict = {}
cluster_name = cluster.split('/')[1]
stats = ecs_Client.describe_clusters(clusters=[cluster])['clusters'][0]['statistics']
for stat in stats:
if stat['name'] == 'CPUUtilization':
cpu_utilization = int(stat['value'])
if cpu_utilization < threshold:
cluster_dict["cluster_name"] = cluster_name
cluster_dict["region"] = reg
result.append(cluster_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_list_expiring_access_keys/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_list_expiring_access_keys/__init__.py
================================================
================================================
FILE: AWS/legos/aws_list_expiring_access_keys/aws_list_expiring_access_keys.json
================================================
{
"action_title": "AWS List Expiring Access Keys",
"action_description": "List Expiring IAM User Access Keys",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_list_expiring_access_keys",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["list"],
"action_nouns": ["expiring","access","aws"],
"action_is_check": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_IAM"],
"action_next_hop": ["a79201f821993867e23dd9603ed7ef5123325353d717c566f902f7ca6e471f5c"],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: AWS/legos/aws_list_expiring_access_keys/aws_list_expiring_access_keys.py
================================================
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Tuple
import datetime
import dateutil.tz
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_list_all_iam_users.aws_list_all_iam_users import aws_list_all_iam_users
class InputSchema(BaseModel):
threshold_days: int = Field(
default=90,
title="Threshold Days",
description="Threshold number(in days) to check for expiry. Eg: 30"
)
def aws_list_expiring_access_keys_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_list_expiring_access_keys(handle, threshold_days: int = 90)-> Tuple:
"""aws_list_expiring_access_keys returns all the ACM issued certificates which are
about to expire given a threshold number of days
:type handle: object
:param handle: Object returned from Task Validate
:type threshold_days: int
:param threshold_days: Threshold number of days to check for expiry. Eg: 30 -lists
all access Keys which are expiring within 30 days
:rtype: Status, List of expiring access keys and Error if any
"""
result = []
all_users = []
try:
all_users = aws_list_all_iam_users(handle=handle)
except Exception as error:
raise error
for each_user in all_users:
try:
iamClient = handle.client('iam')
response = iamClient.list_access_keys(UserName=each_user)
for x in response["AccessKeyMetadata"]:
create_date = x["CreateDate"]
right_now = datetime.datetime.now(dateutil.tz.tzlocal())
diff = right_now - create_date
days_remaining = threshold_days - diff.days
if 0 <= days_remaining <= threshold_days:
final_result = {
"username": x["UserName"],
"access_key_id": x["AccessKeyId"]
}
result.append(final_result)
except Exception as e:
raise e
if result:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_list_expiring_acm_certificates/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_list_expiring_acm_certificates/__init__.py
================================================
##
## Copyright (c) 2022 unSkript, Inc
## All rights reserved.
##
================================================
FILE: AWS/legos/aws_list_expiring_acm_certificates/aws_list_expiring_acm_certificates.json
================================================
{
"action_title": "List Expiring ACM Certificates",
"action_description": "List All Expiring ACM Certificates",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_list_expiring_acm_certificates",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["list"],
"action_nouns": ["expiring","certificates","aws"],
"action_is_check": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ACM"],
"action_next_hop": ["76681732b20a69913f0d9248272271bf2f4ab6459498ec6d0ab055870e0db0bb"],
"action_next_hop_parameter_mapping": {"76681732b20a69913f0d9248272271bf2f4ab6459498ec6d0ab055870e0db0bb": {"name": "Renew AWS SSL Certificates that are close to expiration", "region": ".[0].region", "certificate_arns":".[0].certificates"}}
}
================================================
FILE: AWS/legos/aws_list_expiring_acm_certificates/aws_list_expiring_acm_certificates.py
================================================
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional,Tuple
import datetime
import dateutil
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
threshold_days: int = Field(
title="Threshold Days",
description=("Threshold number(in days) to check for expiry. "
"Eg: 30 -lists all certificates which are expiring within 30 days")
)
region: Optional[str] = Field(
default="",
title='Region',
description='Name of the AWS Region'
)
def aws_list_expiring_acm_certificates_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_list_expiring_acm_certificates(handle, threshold_days: int = 90, region: str=None)-> Tuple:
"""aws_list_expiring_acm_certificates returns all the ACM issued certificates which
are about to expire given a threshold number of days
:type handle: object
:param handle: Object returned from Task Validate
:type threshold_days: int
:param threshold_days: Threshold number of days to check for expiry.
Eg: 30 -lists all certificates which are expiring within 30 days
:type region: str
:param region: Region name of the AWS account
:rtype: Tuple containing status, expiring certificates, and error
"""
arn_list=[]
domain_list = []
expiring_certificates_list= []
expiring_certificates_dict={}
result_list=[]
all_regions = [region]
if region is None or len(region)==0:
all_regions = aws_list_all_regions(handle=handle)
for r in all_regions:
iamClient = handle.client('acm', region_name=r)
try:
expiring_certificates_dict={}
certificates_list = iamClient.list_certificates(CertificateStatuses=['ISSUED'])
for each_arn in certificates_list['CertificateSummaryList']:
arn_list.append(each_arn['CertificateArn'])
domain_list.append(each_arn['DomainName'])
for cert_arn in arn_list:
details = iamClient.describe_certificate(CertificateArn=cert_arn)
for key,value in details['Certificate'].items():
if key == "NotAfter":
expiry_date = value
right_now = datetime.datetime.now(dateutil.tz.tzlocal())
diff = expiry_date-right_now
days_remaining = diff.days
if 0 < days_remaining < threshold_days:
expiring_certificates_list.append(cert_arn)
expiring_certificates_dict["region"]= r
expiring_certificates_dict["certificate"]= expiring_certificates_list
if len(expiring_certificates_list)!=0:
result_list.append(expiring_certificates_dict)
except Exception:
pass
if len(result_list)!=0:
return (False, result_list)
return (True, None)
================================================
FILE: AWS/legos/aws_list_hosted_zones/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_list_hosted_zones/__init__.py
================================================
================================================
FILE: AWS/legos/aws_list_hosted_zones/aws_list_hosted_zones.json
================================================
{
"action_title": "AWS List Hosted Zones",
"action_description": "List all AWS Hosted zones",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_list_hosted_zones",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ROUTE53"]
}
================================================
FILE: AWS/legos/aws_list_hosted_zones/aws_list_hosted_zones.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def aws_list_hosted_zones_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_list_hosted_zones(handle) -> List:
"""aws_list_hosted_zones Returns all hosted zones.
:type handle: object
:param handle: Object returned by the task.validate(...) method.
:rtype: List of all the hosted zones.
"""
route53Client = handle.client('route53')
response = route53Client.list_hosted_zones()
result = []
for hosted_zone in response['HostedZones']:
hosted_zone_id = hosted_zone['Id']
hosted_zone_name = hosted_zone['Name']
result.append({
'id': hosted_zone_id,
'name': hosted_zone_name
})
return result
================================================
FILE: AWS/legos/aws_list_unattached_elastic_ips/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_list_unattached_elastic_ips/__init__.py
================================================
================================================
FILE: AWS/legos/aws_list_unattached_elastic_ips/aws_list_unattached_elastic_ips.json
================================================
{
"action_title": "AWS List Unattached Elastic IPs",
"action_description": "This action lists Elastic IP address and check if it is associated with an instance or network interface.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_list_unattached_elastic_ips",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check":true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2"],
"action_next_hop": ["a9d7ea5f3d31745f1de9fb8616ab6fbc20ff11e665808bdde6a9ba9b8b32e28a"],
"action_next_hop_parameter_mapping": {"a9d7ea5f3d31745f1de9fb8616ab6fbc20ff11e665808bdde6a9ba9b8b32e28a": {"name": "Release Unattached AWS Elastic IPs", "region": ".[0].region", "allocation_ids":"map(.allocation_id)"}}
}
================================================
FILE: AWS/legos/aws_list_unattached_elastic_ips/aws_list_unattached_elastic_ips.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='AWS Region.')
def aws_list_unattached_elastic_ips_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_list_unattached_elastic_ips(handle, region: str = "") -> Tuple:
"""aws_list_unattached_elastic_ips Returns an array of unattached elastic IPs.
:type region: string
:param region: AWS Region.
:rtype: Tuple with status result and list of unattached elastic IPs.
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
# Filtering the public_ip by region
ec2Client = handle.client('ec2', region_name=reg)
all_eips = ec2Client.describe_addresses()
for eip in all_eips["Addresses"]:
vpc_data = {}
if 'AssociationId' not in eip:
vpc_data["public_ip"] = eip['PublicIp']
vpc_data["allocation_id"] = eip['AllocationId']
vpc_data["region"] = reg
result.append(vpc_data)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_list_unhealthy_instances_in_target_group/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_list_unhealthy_instances_in_target_group/__init__.py
================================================
##
## Copyright (c) 2022 unSkript, Inc
## All rights reserved.
##
================================================
FILE: AWS/legos/aws_list_unhealthy_instances_in_target_group/aws_list_unhealthy_instances_in_target_group.json
================================================
{
"action_title": "AWS List Unhealthy Instances in a Target Group",
"action_description": "List Unhealthy Instances in a target group",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_list_unhealthy_instances_in_target_group",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["list"],
"action_nouns": ["unhealthy","instances","target","group","aws"],
"action_is_check": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_TROUBLESHOOTING", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ELB"],
"action_next_hop": ["7a5cf9629c56eb979a01977330c3d2df656e965a78323be4fa49fdc3b527c9d7"],
"action_next_hop_parameter_mapping": {"7a5cf9629c56eb979a01977330c3d2df656e965a78323be4fa49fdc3b527c9d7": {"name": "AWS Restart unhealthy services in a Target Group", "region": ".[0].regions", "instance_ids": "map(.instance)"}}
}
================================================
FILE: AWS/legos/aws_list_unhealthy_instances_in_target_group/aws_list_unhealthy_instances_in_target_group.py
================================================
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
from unskript.legos.utils import parseARN
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='Name of the AWS Region'
)
def aws_list_unhealthy_instances_in_target_group_printer(output):
if output is None:
return
pprint.pprint(output)
def get_all_target_groups(handle, r):
target_arns_list = []
elbv2Client = handle.client('elbv2', region_name=r)
try:
tbs = aws_get_paginator(elbv2Client, "describe_target_groups", "TargetGroups")
for index, tb in enumerate(tbs):
target_arns_list.append(tb.get('TargetGroupArn'))
except Exception:
pass
return target_arns_list
def aws_list_unhealthy_instances_in_target_group(handle, region: str=None) -> Tuple:
result = []
unhealthy_instances_list = []
all_target_groups = []
unhealhthy_instances_dict = {}
all_regions = [region]
if region is None or len(region) == 0:
all_regions = aws_list_all_regions(handle=handle)
for r in all_regions:
try:
output = get_all_target_groups(handle, r)
if len(output) != 0:
all_target_groups.append(output)
except Exception:
pass
for target_group in all_target_groups:
for o in target_group:
parsedArn = parseARN(o)
region_name = parsedArn['region']
elbv2Client = handle.client('elbv2', region_name=region_name)
try:
targetHealthResponse = elbv2Client.describe_target_health(TargetGroupArn=o)
except Exception as e:
print(f"An error occurred while describing target health: {e}") # Log an error message
continue
for ins in targetHealthResponse["TargetHealthDescriptions"]:
if ins['TargetHealth']['State'] in ['unhealthy']:
unhealthy_instances_list.append(ins['Target']['Id'])
if len(unhealthy_instances_list) != 0 and region_name is not None:
unhealhthy_instances_dict['instance'] = unhealthy_instances_list
unhealhthy_instances_dict['region'] = region_name
result.append(unhealhthy_instances_dict)
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_list_unused_secrets/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_list_unused_secrets/__init__.py
================================================
================================================
FILE: AWS/legos/aws_list_unused_secrets/aws_list_unused_secrets.json
================================================
{
"action_title": "AWS List Unused Secrets",
"action_description": "This action lists all the unused secrets from AWS by comparing the last used date with the given threshold.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_list_unused_secrets",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check":true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_IAM", "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_COST_OPT"],
"action_next_hop": ["2a9101a1cf7be1cf70a30de2199dca5b302c3096"],
"action_next_hop_parameter_mapping": {"2a9101a1cf7be1cf70a30de2199dca5b302c3096": {"name": "Delete Unused AWS Secrets","region":".[0].region","secret_names":"map(.secret_name)"}}
}
================================================
FILE: AWS/legos/aws_list_unused_secrets/aws_list_unused_secrets.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Tuple
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from unskript.legos.aws.aws_list_all_regions.aws_list_all_regions import aws_list_all_regions
import pytz
class InputSchema(BaseModel):
region: Optional[str] = Field(
default="",
title='Region',
description='AWS Region.')
max_age_days: Optional[int] = Field(
default=30,
title="Max Age Day's",
description='The threshold to check the last use of the secret.')
def aws_list_unused_secrets_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_list_unused_secrets(handle, region: str = "", max_age_days: int = 30) -> Tuple:
"""aws_list_unused_secrets Returns an array of unused secrets.
:type region: string
:param region: AWS region.
:type max_age_days: int
:param max_age_days: The threshold to check the last use of the secret.
:rtype: Tuple with status result and list of unused secrets.
"""
result = []
all_regions = [region]
if not region:
all_regions = aws_list_all_regions(handle)
for reg in all_regions:
try:
# Filtering the secrets by region
ec2Client = handle.client('secretsmanager', region_name=reg)
res = aws_get_paginator(ec2Client, "list_secrets", "SecretList")
for secret in res:
secret_dict = {}
secret_id = secret['Name']
last_accessed_date = ec2Client.describe_secret(SecretId=secret_id)
if 'LastAccessedDate' in last_accessed_date:
if last_accessed_date["LastAccessedDate"] < datetime.now(pytz.UTC) - timedelta(days=int(max_age_days)):
secret_dict["secret_name"] = secret_id
secret_dict["region"] = reg
result.append(secret_dict)
else:
if last_accessed_date["CreatedDate"] < datetime.now(pytz.UTC) - timedelta(days=int(max_age_days)):
secret_dict["secret_name"] = secret_id
secret_dict["region"] = reg
result.append(secret_dict)
except Exception:
pass
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: AWS/legos/aws_list_users_with_old_passwords/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_list_users_with_old_passwords/__init__.py
================================================
================================================
FILE: AWS/legos/aws_list_users_with_old_passwords/aws_list_users_with_old_passwords.json
================================================
{
"action_title": "AWS List IAM Users With Old Passwords",
"action_description": "This Lego filter gets all the IAM users' login profiles, and if the login profile is available, checks for the last password change if the password is greater than the given threshold, and lists those users.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_list_users_with_old_passwords",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_is_check": true,
"action_next_hop":[],
"action_next_hop_parameter_mapping":{},
"action_categories": [ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_IAM"]
}
================================================
FILE: AWS/legos/aws_list_users_with_old_passwords/aws_list_users_with_old_passwords.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Tuple
from datetime import datetime, timezone, timedelta
from pydantic import BaseModel, Field
from dateutil.parser import parse
from unskript.connectors.aws import aws_get_paginator
class InputSchema(BaseModel):
threshold_days: int = Field(
default = 120,
title='Threshold (In days)',
description=('(in days) The threshold to check the IAM user '
'password older than the threshold.')
)
def aws_list_users_with_old_passwords_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_list_users_with_old_passwords(handle, threshold_days: int = 120) -> Tuple:
"""aws_list_users_with_old_passwords lists all the IAM users with old passwords.
:type handle: object
:param handle: Object returned from Task Validate
:type threshold_days: int
:param threshold_days: (in days) The threshold to check the IAM user
password older than the threshold.
:rtype: Result List of all IAM users
"""
client = handle.client('iam')
users_list = []
now = datetime.now(timezone.utc)
response = aws_get_paginator(client, "list_users", "Users")
for user in response:
try:
login_profile = client.get_login_profile(UserName=user['UserName'])
if 'CreateDate' in login_profile['LoginProfile']:
password_last_changed = parse(
str(login_profile['LoginProfile']['CreateDate'])
).replace(tzinfo=timezone.utc)
password_age = now - password_last_changed
if password_age > timedelta(days=threshold_days):
users_list.append(user['UserName'])
except Exception:
pass
if len(users_list) != 0:
return (False, users_list)
return (True, None)
================================================
FILE: AWS/legos/aws_loadbalancer_list_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_loadbalancer_list_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_loadbalancer_list_instances/aws_loadbalancer_list_instances.json
================================================
{
"action_title": "AWS List Instances behind a Load Balancer.",
"action_description": "List AWS Instances behind a Load Balancer",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_loadbalancer_list_instances",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ELB"]
}
================================================
FILE: AWS/legos/aws_loadbalancer_list_instances/aws_loadbalancer_list_instances.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, List
from pydantic import BaseModel, Field
from unskript.legos.utils import parseARN
class InputSchema(BaseModel):
arn: str = Field(
title='Loadbalancer Name (Classic) or ARN (ALB/NLB)',
description=('Name of the classic loadbalancer or ARN of the ALB/NLB. '
'Classic loadbalancer dont have ARN.')
)
region: Optional[str] = Field(
title='Region of the Classic Loadbalancer',
description='Region of the Classic loadbalancer. You dont need to fill this for ALB/NLB.'
)
classic: bool = Field(
False,
title='Classic Loadbalancer',
description='Check if the loadbalancer is Classic. By default, its false.'
)
def aws_loadbalancer_list_instances_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_loadbalancer_list_instances(
handle,
arn: str,
region: str = None,
classic: bool = False
) -> List:
"""aws_get_unhealthy_instances returns array of instances
:type handle: object
:param handle: Object returned from task.validate(...).
:type arn: string
:param arn: Name of the classic loadbalancer or ARN of the ALB/NLB.
:type classic: bool
:param classic: Check if the loadbalancer is Classic.
:type region: string
:param region: Region of the Classic loadbalancer.
:rtype: Returns array of instances
"""
instancesInfo = []
try:
if classic is False:
parsedArn = parseARN(arn)
elbv2Client = handle.client('elbv2', region_name=parsedArn['region'])
ec2Client = handle.client('ec2', region_name=parsedArn['region'])
# Get the list of target groups behind this LB.
tgs = elbv2Client.describe_target_groups(
LoadBalancerArn=arn
)
for tg in tgs['TargetGroups']:
targetHealthResponse = elbv2Client.describe_target_health(
TargetGroupArn=tg['TargetGroupArn']
)
for ins in targetHealthResponse["TargetHealthDescriptions"]:
try:
privateIP = get_instance_private_ip(ec2Client, ins['Target']['Id'])
except Exception:
continue
instanceInfo = {
'InstanceID': ins['Target']['Id'],
'PrivateIP': privateIP
}
instancesInfo.append(instanceInfo)
else:
elbClient = handle.client('elb', region_name=region)
ec2Client = handle.client('ec2', region_name=region)
res = elbClient.describe_instance_health(
LoadBalancerName=arn
)
for ins in res['InstanceStates']:
try:
privateIP = get_instance_private_ip(ec2Client, ins['InstanceId'])
except Exception:
continue
instanceInfo = {
'InstanceID': ins['InstanceId'],
'PrivateIP': privateIP
}
instancesInfo.append(instanceInfo)
except Exception as e:
print(f'Hit exception {str(e)}')
raise e
return instancesInfo
def get_instance_private_ip(ec2Client, instanceID: str) -> str:
try:
resp = ec2Client.describe_instances(
Filters=[
{
'Name': 'instance-id',
'Values': [instanceID]
}
]
)
except Exception as e:
print(f'Failed to get instance details for {instanceID}, err: {str(e)}')
raise e
return resp['Reservations'][0]['Instances'][0]['PrivateIpAddress']
================================================
FILE: AWS/legos/aws_make_bucket_public/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_make_bucket_public/__init__.py
================================================
================================================
FILE: AWS/legos/aws_make_bucket_public/aws_make_bucket_public.json
================================================
{
"action_title": "Make AWS Bucket Public",
"action_description": "Make an AWS Bucket Public!",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_make_bucket_public",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_S3"]
}
================================================
FILE: AWS/legos/aws_make_bucket_public/aws_make_bucket_public.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
name: str = Field(
title='Bucket Name',
description='Name of the bucket.')
enable_write: bool = Field(
title='Enable write',
description=('Set this to true, if you want the bucket to be publicly writeable as well. '
'By default, it is made publicly readable.')
)
def aws_make_bucket_public_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_make_bucket_public(handle, name: str, enable_write: bool) -> Dict:
"""aws_make_bucket_public Makes bucket public.
:type handle: object
:param handle: Object returned from task.validate(...).
:type name: string
:param name: Name of the bucket.
:type enable_write: bool
:param enable_write: Set this to true for bucket to be publicly writeable.
:rtype: Dict with information about the success of the request.
"""
s3Client = handle.client('s3')
acl = "public-read"
if enable_write:
acl = "public-read-write"
res = s3Client.put_bucket_acl(Bucket=name, ACL=acl)
return res
================================================
FILE: AWS/legos/aws_make_rds_instance_not_publicly_accessible/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_make_rds_instance_not_publicly_accessible/__init__.py
================================================
================================================
FILE: AWS/legos/aws_make_rds_instance_not_publicly_accessible/aws_make_rds_instance_not_publicly_accessible.json
================================================
{
"action_title": "Disallow AWS RDS Instance public accessibility",
"action_description": "Change public accessibility of RDS Instances to False.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_make_rds_instance_not_publicly_accessible",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":["CATEGORY_TYPE_CLOUDOPS","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_RDS"]
}
================================================
FILE: AWS/legos/aws_make_rds_instance_not_publicly_accessible/aws_make_rds_instance_not_publicly_accessible.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
db_instance_identifier: str = Field(
...,
description='The DB instance identifier for the DB instance to be deleted. This parameter isn’t case-sensitive.',
title='RDS Instance Identifier',
)
region: str = Field(
..., description='AWS region of instance identifier', title='AWS Region'
)
def aws_make_rds_instance_not_publicly_accessible_printer(output):
if output is None:
return
print(output)
def aws_make_rds_instance_not_publicly_accessible(handle, db_instance_identifier: str, region: str) -> str:
"""
aws_make_rds_instance_not_publicly_accessible makes the specified RDS instance not publicly accessible.
:type handle: object
:param handle: Object returned from task.validate(...).
:type db_instance_identifier: string
:param db_instance_identifier: Identifier of the RDS instance.
:type region: string
:param region: Region of the RDS instance.
:rtype: Response of the operation.
"""
try:
rdsClient = handle.client('rds', region_name=region)
rdsClient.modify_db_instance(
DBInstanceIdentifier=db_instance_identifier,
PubliclyAccessible=False
)
except Exception as e:
raise e
return f"Public accessiblilty is being changed to False..."
================================================
FILE: AWS/legos/aws_modify_ebs_volume_to_gp3/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_modify_ebs_volume_to_gp3/__init__.py
================================================
================================================
FILE: AWS/legos/aws_modify_ebs_volume_to_gp3/aws_modify_ebs_volume_to_gp3.json
================================================
{
"action_title": "AWS Modify EBS Volume to GP3",
"action_description": "AWS recently introduced the General Purpose SSD (gp3) volume type, which is designed to provide higher IOPS performance at a lower cost than the gp2 volume type.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_modify_ebs_volume_to_gp3",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2"]
}
================================================
FILE: AWS/legos/aws_modify_ebs_volume_to_gp3/aws_modify_ebs_volume_to_gp3.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region.')
volume_id: str = Field(
title='Volume ID',
description='EBS Volume ID.')
def aws_modify_ebs_volume_to_gp3_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_modify_ebs_volume_to_gp3(handle, region: str, volume_id: str) -> List:
"""aws_modify_ebs_volume_to_gp3 returns an array of modified details for EBS volumes.
:type region: string
:param region: Used to filter the volume for specific region.
:type volume_id: string
:param volume_id: EBS Volume ID.
:rtype: List of modified details for EBS volumes
"""
result = []
try:
ec2Client = handle.client('ec2', region_name=region)
volumes = ec2Client.modify_volume(VolumeId=volume_id, VolumeType='gp3')
result.append(volumes)
except Exception as e:
result.append({"error": e})
return result
================================================
FILE: AWS/legos/aws_modify_listener_for_http_redirection/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_modify_listener_for_http_redirection/__init__.py
================================================
================================================
FILE: AWS/legos/aws_modify_listener_for_http_redirection/aws_modify_listener_for_http_redirection.json
================================================
{
"action_title": "AWS Modify ALB Listeners HTTP Redirection",
"action_description": "AWS Modify ALB Listeners HTTP Redirection",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_modify_listener_for_http_redirection",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_remediation": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ELB", "CATEGORY_TYPE_AWS_EC2"]
}
================================================
FILE: AWS/legos/aws_modify_listener_for_http_redirection/aws_modify_listener_for_http_redirection.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
listener_arn: str = Field(
title='ListenerArn',
description='listener ARNs.')
region: str = Field(
title='Region',
description='AWS Region of the ALB listeners.')
def aws_modify_listener_for_http_redirection_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_modify_listener_for_http_redirection(handle, listener_arn: str, region: str) -> List:
"""aws_modify_listener_for_http_redirection List of Dict with modified listener info.
:type handle: object
:param handle: Object returned from task.validate(...).
:type listener_arn: string
:param listener_arn: List of listenerArn.
:type region: string
:param region: Region to filter ALB listeners.
:rtype: List of Dict with modified ALB listeners info.
"""
listner_config = [{
"Type": "redirect",
"Order": 1,
"RedirectConfig": {
"Protocol": "HTTPS",
"Host": "#{host}",
"Query": "#{query}",
"Path": "/#{path}",
"Port": "443",
"StatusCode": "HTTP_302"}}]
result = []
try:
ec2Client = handle.client('elbv2', region_name=region)
response = ec2Client.modify_listener(ListenerArn=listener_arn,
DefaultActions=listner_config)
result.append(response)
except Exception as error:
result.append(error)
return result
================================================
FILE: AWS/legos/aws_modify_public_db_snapshots/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_modify_public_db_snapshots/__init__.py
================================================
================================================
FILE: AWS/legos/aws_modify_public_db_snapshots/aws_modify_public_db_snapshots.json
================================================
{
"action_title": "AWS Modify Publicly Accessible RDS Snapshots",
"action_description": "AWS Modify Publicly Accessible RDS Snapshots",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_modify_public_db_snapshots",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_is_remediation": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_RDS", "CATEGORY_TYPE_AWS_EC2"]
}
================================================
FILE: AWS/legos/aws_modify_public_db_snapshots/aws_modify_public_db_snapshots.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
db_snapshot_identifier: str = Field(
title='DB Snapshot Idntifier',
description='DB Snapshot Idntifier of RDS.'
)
region: str = Field(
title='Region',
description='Region of the RDS.'
)
def aws_modify_public_db_snapshots_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_modify_public_db_snapshots(handle, db_snapshot_identifier: str, region: str) -> List:
"""aws_modify_public_db_snapshots lists of publicly accessible DB Snapshot Idntifier Info.
:type handle: object
:param handle: Object returned from task.validate(...).
:type db_snapshot_identifier: string
:param db_snapshot_identifier: DB Snapshot Idntifier of RDS.
:type region: string
:param region: Region of the RDS.
:rtype: List with Dict of DB Snapshot Idntifier Info.
"""
ec2Client = handle.client('rds', region_name=region)
result = []
try:
response = ec2Client.modify_db_snapshot_attribute(
DBSnapshotIdentifier=db_snapshot_identifier,
AttributeName='restore',
ValuesToRemove=['all'])
result.append(response)
except Exception as error:
result.append(error)
return result
================================================
FILE: AWS/legos/aws_postgresql_get_configured_max_connections/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_postgresql_get_configured_max_connections/__init__.py
================================================
================================================
FILE: AWS/legos/aws_postgresql_get_configured_max_connections/aws_postgresql_get_configured_max_connections.json
================================================
{
"action_title": "Get AWS Postgresql Max Configured Connections",
"action_description": "Get AWS Postgresql Max Configured Connections",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_postgresql_get_configured_max_connections",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2", "CATEGORY_TYPE_AWS_POSTGRES"]
}
================================================
FILE: AWS/legos/aws_postgresql_get_configured_max_connections/aws_postgresql_get_configured_max_connections.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
cluster_identifier: str = Field(
title='DB Identifier',
description='RDS Cluster DB Identifier.')
region: str = Field(
title='Region',
description='AWS Region of the Postgres DB Cluster.')
def aws_postgresql_get_configured_max_connections_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_postgresql_get_configured_max_connections(
handle,
cluster_identifier: str,
region: str
) -> str:
"""aws_postgresql_get_configured_max_connection Get the configured max connection.
:type handle: object
:param handle: Object returned from task.validate(...).
:type cluster_identifier: string
:param cluster_identifier: RDS Cluster DB Identifier.
:type region: string
:param region: AWS Region of the Postgres DB Cluster.
:rtype: All the results of the query.
"""
# Input param validation.
ec2_client = handle.client('ec2', region_name=region)
# Get the list of instance types and their memory info.
paginator = ec2_client.get_paginator('describe_instance_types')
page_iterator = paginator.paginate()
instance_type_memory_map = {}
try:
for page in page_iterator:
for instance_type in page['InstanceTypes']:
instance_type_memory_map[instance_type['InstanceType']] = instance_type['MemoryInfo']['SizeInMiB']
except Exception as e:
print(f'describe_instance_types hit an exception {str(e)}')
raise e
rds_client = handle.client('rds', region_name=region)
try:
describe_db_clusters_resp = rds_client.describe_db_clusters(
DBClusterIdentifier=cluster_identifier
)
except Exception as e:
print(f'describe_db_clusters for cluster {cluster_identifier} hit an exception, {str(e)}')
raise e
cluster_info = describe_db_clusters_resp['DBClusters'][0]
cluster_parameter_group_name = cluster_info['DBClusterParameterGroup']
cluster_instances = []
for info in cluster_info['DBClusterMembers']:
cluster_instances.append(info['DBInstanceIdentifier'])
# Now get the type of the DBInstance Identifier.
# ASSUMPTION: All nodes are of the same type.
try:
describe_instance_resp = rds_client.describe_db_instances(
DBInstanceIdentifier=cluster_instances[0]
)
except Exception as e:
print(f'describe_db_instance for cluster {cluster_instances[0]} failed, {str(e)}')
raise e
cluster_instance_type = describe_instance_resp['DBInstances'][0]['DBInstanceClass'].lstrip('db.')
cluster_instance_memory = instance_type_memory_map[cluster_instance_type]
# Get the max connections for this postgresql. 2 options here:
# 1. If the max connection is configured via parameter group, get it from there.
# 2. If its default, its LEAST({DBInstanceClassMemory/9531392}, 5000)
paginator = rds_client.get_paginator('describe_db_parameters')
operation_parameters = {'DBParameterGroupName': cluster_parameter_group_name}
page_iterator = paginator.paginate(**operation_parameters)
for page in page_iterator:
for parameter in page['Parameters']:
if parameter['ParameterName'] == 'max_connections':
if parameter['ParameterValue'].startswith('LEAST'):
return str(int(min(cluster_instance_memory * 1048576 / 9531392, 5000)))
else:
return parameter['ParameterValue']
================================================
FILE: AWS/legos/aws_postgresql_plot_active_connections/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_postgresql_plot_active_connections/__init__.py
================================================
================================================
FILE: AWS/legos/aws_postgresql_plot_active_connections/aws_postgresql_plot_active_connections.json
================================================
{
"action_title": "Plot AWS PostgreSQL Active Connections",
"action_description": "Plot AWS PostgreSQL Action Connections",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_postgresql_plot_active_connections",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_NONE",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2", "CATEGORY_TYPE_AWS_POSTGRES"]
}
================================================
FILE: AWS/legos/aws_postgresql_plot_active_connections/aws_postgresql_plot_active_connections.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from datetime import datetime, timedelta
from pydantic import BaseModel, Field
import matplotlib.pyplot as plt
class InputSchema(BaseModel):
cluster_identifier: str = Field(
title='DB Identifier',
description='RDS DB Identifier.')
max_connections: int = Field(
title='Max Connections',
description='Configured max connections.')
time_since: int = Field(
title='Time Since',
description=('Starting from now, window (in seconds) for which you '
'want to get the datapoints for.')
)
region: str = Field(
title='Region',
description='AWS Region of the Postgres DB Cluster.')
def aws_postgresql_plot_active_connections(
handle,
cluster_identifier: str,
max_connections: int,
time_since: int,
region: str
) -> None:
"""aws_postgresql_plot_active_connections Plots the active connections
normalized by the max connections.
:type handle: object
:param handle: Object returned from task.validate(...).
:type cluster_identifier: string
:param cluster_identifier: RDS DB Identifier.
:type max_connections: string
:param max_connections: Configured max connections.
:type time_since: int
:param time_since: Starting from now, window (in seconds) for which
you want to get the datapoints for.
:type region: string
:param region: AWS Region of the Postgres DB Cluster.
:rtype: All the results of the query.
"""
# Input param validation.
# Get the list of instances in this cluster and their types.
rds_client = handle.client('rds', region_name=region)
try:
describe_db_clusters_resp = rds_client.describe_db_clusters(
DBClusterIdentifier=cluster_identifier
)
except Exception as e:
print(f'describe_db_clusters for cluster {cluster_identifier} hit an exception, {str(e)}')
raise e
cluster_info = describe_db_clusters_resp['DBClusters'][0]
cluster_instances = []
for value in cluster_info['DBClusterMembers']:
cluster_instances.append(value['DBInstanceIdentifier'])
cloud_watch_client = handle.client('cloudwatch', region_name=region)
plt.figure(figsize=(10, 10))
plt.ylabel('ActiveConnections/MaxConnections')
for cluster_instance in cluster_instances:
ts, data_points = get_normalized_active_connections(
cloud_watch_client,
cluster_instance,
time_since, max_connections
)
plt.plot(ts, data_points, label=cluster_instance)
plt.legend(loc=1, fontsize='medium')
plt.show()
def get_normalized_active_connections(
cloudWatch_client,
db_instance_id,
time_since,
max_connections
):
# Gets metric statistics.
res = cloudWatch_client.get_metric_statistics(
Namespace="AWS/RDS",
MetricName="DatabaseConnections",
Dimensions=[{"Name": "DBInstanceIdentifier", "Value": db_instance_id}],
Period=6000,
StartTime=datetime.utcnow() - timedelta(seconds=time_since),
EndTime=datetime.utcnow(),
Statistics=[
"Average"
]
)
data = {}
for datapoints in res['Datapoints']:
data[datapoints['Timestamp']] = datapoints["Average"] / max_connections
# Sorts data.
data_keys = data.keys()
times_stamps = list(data_keys)
times_stamps.sort()
sorted_values = []
for value in times_stamps:
sorted_values.append(data[value])
return (times_stamps, sorted_values)
================================================
FILE: AWS/legos/aws_purchase_elasticcache_reserved_node/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_purchase_elasticcache_reserved_node/__init__.py
================================================
================================================
FILE: AWS/legos/aws_purchase_elasticcache_reserved_node/aws_purchase_elasticcache_reserved_node.json
================================================
{
"action_title": "AWS Purchase ElastiCache Reserved Nodes",
"action_description": "This action purchases a reserved cache node offering.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_purchase_elasticcache_reserved_node",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ELASTICACHE"]
}
================================================
FILE: AWS/legos/aws_purchase_elasticcache_reserved_node/aws_purchase_elasticcache_reserved_node.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Dict
import pprint
class InputSchema(BaseModel):
region: str = Field(
description='AWS Region.',
title='Region'
)
reserved_node_offering_id: str = Field(
description='The unique identifier of the reserved cache node offering you want to purchase.',
title='Reserved Cache Node Offering ID',
)
no_of_nodes: Optional[int] = Field(
1,
description='The number of reserved cache nodes that you want to purchase.',
title='No of nodes to purchase',
)
def aws_purchase_elasticcache_reserved_node_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_purchase_elasticcache_reserved_node(handle, region: str, reserved_node_offering_id: str, no_of_nodes:int=1) -> Dict:
"""aws_purchase_elasticcache_reserved_node returns dict of response.
:type region: string
:param region: AWS Region.
:type reserved_node_offering_id: string
:param reserved_node_offering_id: The unique identifier of the reserved node offering you want to purchase. Example: '438012d3-4052-4cc7-b2e3-8d3372e0e706'
:type no_of_nodes: int
:param no_of_nodes: The number of reserved nodes that you want to purchase.
:rtype: dict of response metatdata of purchasing a reserved node
"""
try:
elasticClient = handle.client('elasticache', region_name=region)
params = {
'ReservedCacheNodesOfferingId': reserved_node_offering_id,
'CacheNodeCount': no_of_nodes
}
response = elasticClient.purchase_reserved_cache_nodes_offering(**params)
return response
except Exception as e:
raise Exception(e)
================================================
FILE: AWS/legos/aws_purchase_rds_reserved_instance/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_purchase_rds_reserved_instance/__init__.py
================================================
================================================
FILE: AWS/legos/aws_purchase_rds_reserved_instance/aws_purchase_rds_reserved_instance.json
================================================
{
"action_title": "AWS Purchase RDS Reserved Instances",
"action_description": "This action purchases a reserved DB instance offering.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_purchase_rds_reserved_instance",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_RDS"]
}
================================================
FILE: AWS/legos/aws_purchase_rds_reserved_instance/aws_purchase_rds_reserved_instance.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Dict
import pprint
class InputSchema(BaseModel):
region: str = Field(
description='AWS Region.',
title='Region'
)
reserved_instance_offering_id: str = Field(
description='The ID of the Reserved DB instance offering to purchase. Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706',
title='Reserved Instance Offering ID',
)
db_instance_count: Optional[int] = Field(
1,
description='The number of instances to reserve.',
title='Instance Count'
)
def aws_purchase_rds_reserved_instance_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_purchase_rds_reserved_instance(handle, region: str, reserved_instance_offering_id: str, db_instance_count:int=1) -> Dict:
"""aws_purchase_rds_reserved_instance returns dict of response.
:type region: string
:param region: AWS Region.
:type reserved_instance_offering_id: string
:param reserved_instance_offering_id: The unique identifier of the reserved instance offering you want to purchase.
:type db_instance_count: int
:param db_instance_count: The number of reserved instances that you want to purchase.
:rtype: dict of response metatdata of purchasing a reserved instance
"""
try:
redshiftClient = handle.client('redshift', region_name=region)
params = {
'ReservedDBInstancesOfferingId': reserved_instance_offering_id,
'DBInstanceCount': db_instance_count
}
response = redshiftClient.purchase_reserved_db_instances_offering(**params)
return response
except Exception as e:
raise Exception(e)
================================================
FILE: AWS/legos/aws_purchase_redshift_reserved_node/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_purchase_redshift_reserved_node/__init__.py
================================================
================================================
FILE: AWS/legos/aws_purchase_redshift_reserved_node/aws_purchase_redshift_reserved_node.json
================================================
{
"action_title": "AWS Purchase Redshift Reserved Nodes",
"action_description": "This action purchases reserved nodes. Amazon Redshift offers a predefined set of reserved node offerings. You can purchase one or more of the offerings.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_purchase_redshift_reserved_node",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_REDSHIFT"]
}
================================================
FILE: AWS/legos/aws_purchase_redshift_reserved_node/aws_purchase_redshift_reserved_node.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
region: str = Field(
description='AWS Region.',
title='Region'
)
reserved_node_offering_id: str = Field(
description='The unique identifier of the reserved node offering you want to purchase.',
title='Reserved Node Offering ID',
)
no_of_nodes: Optional[int] = Field(
1,
description='The number of reserved nodes that you want to purchase.',
title='No od Nodes to reserve',
)
def aws_purchase_redshift_reserved_node_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_purchase_redshift_reserved_node(handle, region: str, reserved_node_offering_id: str, no_of_nodes:int=1) -> Dict:
"""aws_purchase_redshift_reserved_node returns dict of response.
:type region: string
:param region: AWS Region.
:type reserved_node_offering_id: string
:param reserved_node_offering_id: The unique identifier of the reserved node offering you want to purchase.
:type no_of_nodes: int
:param no_of_nodes: The number of reserved nodes that you want to purchase.
:rtype: dict of response metatdata of purchasing a reserved node
"""
try:
redshiftClient = handle.client('redshift', region_name=region)
params = {
'ReservedNodeOfferingId': reserved_node_offering_id,
'NodeCount': no_of_nodes
}
response = redshiftClient.purchase_reserved_node_offering(**params)
return response
except Exception as e:
raise Exception(e)
================================================
FILE: AWS/legos/aws_put_bucket_cors/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_put_bucket_cors/__init__.py
================================================
================================================
FILE: AWS/legos/aws_put_bucket_cors/aws_put_bucket_cors.json
================================================
{
"action_title": " Apply CORS Policy for S3 Bucket",
"action_description": " Apply CORS Policy for S3 Bucket",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_put_bucket_cors",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_S3"]
}
================================================
FILE: AWS/legos/aws_put_bucket_cors/aws_put_bucket_cors.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
# @author: Yugal Pachpande, @email: yugal.pachpande@unskript.com
##
import pprint
from typing import Any, Dict, List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
name: str = Field(
title='Bucket name',
description='Name of the bucket.'
)
corsRules: List[Dict[str, Any]] = Field(
title='Bucket Policy',
description=('cross-origin access configuration in JSON format. '
'eg. [{\"AllowedHeaders\":["*"],\"AllowedMethods\":[\"PUT\",\"POST\",\"DELETE\"],'
'\"AllowedOrigins\":[\"http://www.example1.com\" ],\"ExposeHeaders\": []}, '
'{\"AllowedHeaders\": [],\"AllowedMethods\":[\"GET\"],\"AllowedOrigins\":[\"*\"],'
'\"ExposeHeaders\":[]}]')
)
region: str = Field(
title='Region',
description='AWS region of the bucket.'
)
def aws_put_bucket_cors_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_put_bucket_cors(handle, name: str, corsRules: List, region: str) -> Dict:
"""aws_put_bucket_cors Puts CORS policy for bucket.
:type handle: object
:param handle: Object returned from task.validate(...).
:type name: string
:param name: Name of the bucket.
:type corsRules: list
:param corsRules: cross-origin access configuration in JSON format.
:type region: string
:param region: AWS region of the bucket.
:rtype: Dict with the response info.
"""
# Input param validation.
s3Client = handle.client('s3', region_name=region)
cors_configuration = {'CORSRules': corsRules}
pprint.pprint(f"Applying config to bucket: {str(cors_configuration)}")
# Setup a CORS policy
res = s3Client.put_bucket_cors(
Bucket=name,
CORSConfiguration=cors_configuration
)
return res
================================================
FILE: AWS/legos/aws_put_bucket_policy/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_put_bucket_policy/__init__.py
================================================
================================================
FILE: AWS/legos/aws_put_bucket_policy/aws_put_bucket_policy.json
================================================
{
"action_title": "Apply AWS New Policy for S3 Bucket",
"action_description": "Apply a New AWS Policy for S3 Bucket",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_put_bucket_policy",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_S3"]
}
================================================
FILE: AWS/legos/aws_put_bucket_policy/aws_put_bucket_policy.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
## @author: Amit Chandak, @email: amit@unskript.com
##
import pprint
from typing import Dict
import json
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
name: str = Field(
title='Bucket name',
description='Name of the bucket.'
)
policy: str = Field(
title='Bucket Policy',
description='Bucket policy in JSON format.'
)
region: str = Field(
title='Region',
description='AWS region of the bucket.'
)
def aws_put_bucket_policy_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_put_bucket_policy(handle, name: str, policy: str, region: str) -> Dict:
"""aws_put_bucket_policy Puts new policy for bucket.
:type handle: object
:param handle: Object returned from task.validate(...).
:type name: string
:param name: Name of the bucket.
:type policy: string
:param policy: Bucket policy in JSON format.
:type region: string
:param region: AWS region of the bucket.
:rtype: Dict with the response info.
"""
# Input param validation.
s3Client = handle.client('s3',
region_name=region)
# Setup a policy
res = s3Client.put_bucket_policy(
Bucket=name,
Policy=json.dumps(policy)
)
return res['ResponseMetadata']
================================================
FILE: AWS/legos/aws_read_object/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_read_object/__init__.py
================================================
================================================
FILE: AWS/legos/aws_read_object/aws_read_object.json
================================================
{
"action_title": "Read AWS S3 Object",
"action_description": "Read an AWS S3 Object",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_read_object",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_S3"]
}
================================================
FILE: AWS/legos/aws_read_object/aws_read_object.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import io
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
name: str = Field(
title='Bucket Name',
description='Name of the bucket of the object.')
key: str = Field(
title='Object Name',
description=('Name of S3 object or Prefix. Prefix should end with / '
'to return the list of objects present in the bucket')
)
def aws_read_object_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_read_object(handle, name: str, key: str) -> List:
"""aws_read_object Reads object in S3.
:type handle: object
:param handle: Object returned from task.validate(...).
:type name: string
:param name: Name of the bucket of the object.
:type key: string
:param key: Name of S3 object or Prefix.
:rtype: List with the object data.
"""
s3Client = handle.client('s3')
if key.endswith("/"):
folder_list = []
res = s3Client.list_objects(Bucket=name, Prefix=key)
print("\n")
for content in res.get('Contents', []):
print(content.get("Key"))
folder_list.append(content.get("Key"))
return folder_list
else:
res = s3Client.get_object(Bucket=name, Key=key)
fileSizeLimit = 100000
output = str(io.BytesIO(res['Body'].read()).read(fileSizeLimit))
return [output]
================================================
FILE: AWS/legos/aws_register_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_register_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_register_instances/aws_register_instances.json
================================================
{
"action_title": " Register AWS Instances with a Load Balancer",
"action_description": " Register AWS Instances with a Load Balancer",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_register_instances",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ELB"]
}
================================================
FILE: AWS/legos/aws_register_instances/aws_register_instances.py
================================================
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List, Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
elb_name: str = Field(
title='ELB Name',
description='Name of the Load Balancer.')
instance_ids: List[str] = Field(
title='Instance IDs',
description='List of instance IDs. For eg. ["i-foo", "i-bar"]')
region: str = Field(
title='Region',
description='AWS Region of the ELB.')
def aws_register_instances_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_register_instances(handle, elb_name: str, instance_ids: List, region: str) -> Dict:
"""aws_register_instances returns dict of register info
:type handle: object
:param handle: Object returned from task.validate(...).
:type elb_name: string
:param elb_name: Name of the Load Balancer.
:type instance_ids: string
:param instance_ids: List of instance IDs.
:type region: string
:param region: AWS Region of the ELB.
:rtype: Dict of register info
"""
elbClient = handle.client('elb', region_name=region)
res = elbClient.register_instances_with_load_balancer(
LoadBalancerName=elb_name,
Instances=[{'InstanceId': instance_id} for instance_id in instance_ids]
)
return res
================================================
FILE: AWS/legos/aws_release_elastic_ip/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_release_elastic_ip/__init__.py
================================================
================================================
FILE: AWS/legos/aws_release_elastic_ip/aws_release_elastic_ip.json
================================================
{
"action_title": "AWS Release Elastic IP",
"action_description": "AWS Release Elastic IP for both VPC and Standard",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_release_elastic_ip",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_release_elastic_ip/aws_release_elastic_ip.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
allocation_id: str = Field(
title='Allocation ID',
description='Allocation ID of the Elastic IP to release.')
region: str = Field(
title='Region',
description='AWS Region.')
def aws_release_elastic_ip_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_release_elastic_ip(handle, region: str, allocation_id: str) -> Dict:
"""aws_release_elastic_ip release elastic ip.
:type allocation_id: string
:param allocation_id: Allocation ID of the Elastic IP to release.
:type region: string
:param region: AWS Region.
:rtype: Dict with the release elastic ip info.
"""
try:
ec2_Client = handle.client('ec2', region_name=region)
response = ec2_Client.release_address(AllocationId=allocation_id)
return response
except Exception as e:
raise Exception(e) from e
================================================
FILE: AWS/legos/aws_renew_expiring_acm_certificates/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_renew_expiring_acm_certificates/__init__.py
================================================
================================================
FILE: AWS/legos/aws_renew_expiring_acm_certificates/aws_renew_expiring_acm_certificates.json
================================================
{
"action_title": "Renew Expiring ACM Certificates",
"action_description": "Renew Expiring ACM Certificates",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_renew_expiring_acm_certificates",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["renew"],
"action_nouns": ["certificates","acm","aws"],
"action_is_check": false,
"action_is_remediation": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ACM"]
}
================================================
FILE: AWS/legos/aws_renew_expiring_acm_certificates/aws_renew_expiring_acm_certificates.py
================================================
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict, List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
aws_certificate_arn: List = Field(
title="Certificate ARN",
description="ARN of the Certificate"
)
region: str = Field(
title='Region',
description='Name of the AWS Region'
)
def aws_renew_expiring_acm_certificates_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_renew_expiring_acm_certificates(handle, aws_certificate_arn: List, region: str='') -> Dict:
"""aws_renew_expiring_acm_certificates returns all the ACM issued certificates
which are about to expire given a threshold number of days
:type handle: object
:param handle: Object returned from Task Validate
:type aws_certificate_arn: List
:param aws_certificate_arn: ARN of the Certificate
:type region: str
:param region: Region name of the AWS account
:rtype: Result Dictionary of result
"""
result = {}
try:
acmClient = handle.client('acm', region_name=region)
for arn in aws_certificate_arn:
acmClient.renew_certificate(CertificateArn=arn)
result[arn] = "Successfully renewed"
except Exception as e:
result["error"] = e
return result
================================================
FILE: AWS/legos/aws_request_service_quota_increase/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_restart_ec2_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_restart_ec2_instances/aws_restart_ec2_instances.json
================================================
{
"action_title": "Restart AWS EC2 Instances",
"action_description": "Restart AWS EC2 Instances",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_restart_ec2_instances",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2"]
}
================================================
FILE: AWS/legos/aws_restart_ec2_instances/aws_restart_ec2_instances.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List, Dict
from pydantic import BaseModel, Field
from beartype import beartype
class InputSchema(BaseModel):
instance_ids: List[str] = Field(
title='Instance IDs',
description='List of instance IDs. For eg. ["i-foo", "i-bar"]')
region: str = Field(
title='Region',
description='AWS Region of the instances.')
@beartype
def aws_restart_ec2_instances_printer(output):
if output is None:
return
pprint.pprint(output)
@beartype
def aws_restart_ec2_instances(handle, instance_ids: List, region: str) -> Dict:
"""aws_restart_ec2_instances Restarts instances.
:type handle: object
:param handle: Object returned by the task.validate(...) method.
:type instance_ids: list
:param instance_ids: List of instance ids.
:type region: string
:param region: Region for instance.
:rtype: Dict with the restarted instances info.
"""
ec2Client = handle.client('ec2', region_name=region)
res = ec2Client.reboot_instances(InstanceIds=instance_ids)
return res
================================================
FILE: AWS/legos/aws_revoke_policy_from_iam_user/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_revoke_policy_from_iam_user/__init__.py
================================================
================================================
FILE: AWS/legos/aws_revoke_policy_from_iam_user/aws_revoke_policy_from_iam_user.json
================================================
{
"action_title": "AWS Revoke Policy from IAM User",
"action_description": "AWS Revoke Policy from IAM User",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_revoke_policy_from_iam_user",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": ["CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_revoke_policy_from_iam_user/aws_revoke_policy_from_iam_user.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
user_name: str = Field(
title='User Name',
description='The name of the IAM user from whom to revoke the policy.')
policy_arn: str = Field(
title='Policy ARNs',
description='The Amazon Resource Name (ARN) of the policy.')
def aws_revoke_policy_from_iam_user_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_revoke_policy_from_iam_user(handle, user_name: str, policy_arn: str) -> Dict:
"""aws_revoke_policy_from_iam_user revoke policy from iam user.
:type handle: object
:param handle: Object returned from Task Validate
:type policy_arn: str
:param policy_arn: The Amazon Resource Name (ARN) of the policy.
:type user_name: str
:param user_name: The name of the IAM user from whom to revoke the policy.
:rtype: Dict
"""
try:
client = handle.client('iam')
response = client.detach_user_policy(
UserName=user_name,
PolicyArn=policy_arn)
return response
except Exception as e:
raise Exception(e) from e
================================================
FILE: AWS/legos/aws_run_instances/README.md
================================================
TBD
================================================
FILE: AWS/legos/aws_run_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_run_instances/aws_run_instances.json
================================================
{
"action_title": "Start AWS Instances",
"action_description": "Start an AWS EC2 Instances",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_run_instances",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_verbs": [
"start"
],
"action_nouns": [
"aws",
"instances"
],
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2"]
}
================================================
FILE: AWS/legos/aws_run_instances/aws_run_instances.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
instance_id: str = Field(
title='Instance Id',
description='ID of the instance to be run.')
region: str = Field(
title='Region',
description='AWS Region of the instance.')
def aws_run_instances_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_run_instances(handle, instance_id: str, region: str) -> Dict:
"""def aws_run_instances Runs instances.
:type instance_id: string
:param instance_id: String containing the name of AWS EC2 instance
:type region: string
:param region: AWS region for instance
:rtype: Dict with the runing instances state info.
"""
ec2Client = handle.client('ec2', region_name=region)
output = {}
res = ec2Client.start_instances(InstanceIds=[instance_id])
for instances in res['StartingInstances']:
output[instances['InstanceId']] = instances['CurrentState']
return output
================================================
FILE: AWS/legos/aws_schedule_pause_resume_enabled/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_schedule_pause_resume_enabled/__init__.py
================================================
================================================
FILE: AWS/legos/aws_schedule_pause_resume_enabled/aws_schedule_pause_resume_enabled.json
================================================
{
"action_title": "AWS Schedule Redshift Cluster Pause Resume Enabled",
"action_description": "AWS Schedule Redshift Cluster Pause Resume Enabled",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_schedule_pause_resume_enabled",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_schedule_pause_resume_enabled/aws_schedule_pause_resume_enabled.py
================================================
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
region: str = Field(
title='Region',
description='AWS Region of the EBS volume')
iam_role_arn: str = Field(
title='IAM Role',
description='The ARN of the IAM role.')
cluster_name: str = Field(
title='Redshift Cluster Name',
description='The name of the Redshift cluster.')
pause_schedule_expression: str = Field(
title='Cron Expression for Pause',
description='The cron expression for the pause schedule.')
resume_schedule_expression: str = Field(
title='Cron Expression for Resume',
description='The cron expression for the resume schedule.')
def aws_schedule_pause_resume_enabled_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_schedule_pause_resume_enabled(handle,
iam_role_arn: str,
cluster_name: str,
region: str,
pause_schedule_expression: str,
resume_schedule_expression: str) -> List:
"""aws_schedule_pause_resume_enabled schedule pause and resume enabled.
:type iam_role_arn: str
:param iam_role_arn: The ARN of the IAM role.
:type cluster_name: str
:param cluster_name: The name of the Redshift cluster.
:type region: str
:param region: AWS Region.
:type pause_schedule_expression: str
:param pause_schedule_expression: The cron expression for the pause schedule.
:type resume_schedule_expression: str
:param resume_schedule_expression: The cron expression for the resume schedule.
:rtype: List
:return: A list of pause and resume enabled status.
"""
result = []
pause_action_name = f"{cluster_name}-scheduled-pause"
resume_action_name = f"{cluster_name}-scheduled-resume"
try:
redshift_client = handle.client('redshift', region_name=region)
# Schedule pause action
response_pause = redshift_client.create_scheduled_action(
ScheduledActionName=pause_action_name,
TargetAction={
'PauseCluster': {'ClusterIdentifier': cluster_name}
},
Schedule=pause_schedule_expression,
IamRole=iam_role_arn,
Enable=True
)
result.append(response_pause)
# Schedule resume action
response_resume = redshift_client.create_scheduled_action(
ScheduledActionName=resume_action_name,
TargetAction={
'ResumeCluster': {'ClusterIdentifier': cluster_name}
},
Schedule=resume_schedule_expression,
IamRole=iam_role_arn,
Enable=True
)
result.append(response_resume)
except Exception as error:
raise Exception(error)
return result
================================================
FILE: AWS/legos/aws_send_email/README.md
================================================
[
## Try it Out
You Try this Action in the unSkript [Free Trial](https://us.app.unskript.io/), or using the [open source Docker image](http://runbooks.sh).
================================================
FILE: AWS/legos/aws_send_email/__init__.py
================================================
================================================
FILE: AWS/legos/aws_send_email/aws_send_email.json
================================================
{
"action_title": "AWS Send Email with SES",
"action_description": "This Action sends an Email with AWS Simple Email Service. Input the sender and recipient addresses, a subject and the body of the message (and the AWS region for SES), and your message will be sent.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_send_email",
"action_needs_credential": "true",
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": "false",
"action_supports_iteration": "true",
"action_supports_poll": "true",
"action_categories":[ "CATEGORY_TYPE_CLOUDOPS","CATEGORY_TYPE_AWS" ]
}
================================================
FILE: AWS/legos/aws_send_email/aws_send_email.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
# You must have the Sender email set up and
# verified in AWS SES for this actio to work.
from pydantic import BaseModel, Field
from typing import Dict
import pprint
from botocore.exceptions import ClientError
class InputSchema(BaseModel):
Message: str = Field(
..., description='The body of the message to be sent.', title='Message'
)
Receiver: str = Field(
..., description='Email address to receive the message.', title='Receiver'
)
Region: str = Field(..., description='AWS Region', title='Region')
Sender: str = Field(
..., description='Email address sending the message.', title='Sender'
)
Subject: str = Field(...,
description='Subject line of the email.', title='Subject')
def aws_send_email_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_send_email(handle, Region: str, Sender: str, Receiver: str, Subject: str, Message: str) -> Dict:
client = handle.client('ses', region_name=Region)
# Create the email message
message = {
'Subject': {
'Data': Subject
},
'Body': {
'Text': {
'Data': Message
}
}
}
try:
# Send the email
response = client.send_email(
Source=Sender,
Destination={
'ToAddresses': [Receiver]
},
Message=message
)
except ClientError as e:
response = e
raise e
# Print the response
print(response)
return response
================================================
FILE: AWS/legos/aws_service_quota_limits/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_service_quota_limits/__init__.py
================================================
================================================
FILE: AWS/legos/aws_service_quota_limits/aws_service_quota_limits.json
================================================
{
"action_title": "AWS Service Quota Limits",
"action_description": "Input a List of Service Quotas, and get back which of your instances are above the warning percentage of the quota",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_service_quota_limits",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS"]
}
================================================
FILE: AWS/legos/aws_service_quota_limits/aws_service_quota_limits.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from __future__ import annotations
import pprint
import json
from typing import List
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from beartype import beartype
#here is a sample quota input:
#[{'QuotaName':'VPCs Per Region','ServiceCode':'vpc',
# 'QuotaCode': 'L-F678F1CE', 'ApiName': 'describe_vpcs',
# 'ApiFilter' : '[]','ApiParam': 'Vpcs', 'initialQuery': ''}]
# the values are described in a blog post:
# https://unskript.com/aws-service-quotas-discovering-where-you-stand/
class InputSchema(BaseModel):
region: str = Field(..., description='AWS Region of the instances.', title='Region')
warning_percentage: float = Field(
50,
description=('Threshold for alerting Service Quota. If set to 50, '
'any service at 50% of quota usage will be reported.'),
title='warning_percentage',
)
quota_input: List = Field(
'', description='Array of inputs - see readme for format', title='quota_input'
)
@beartype
def aws_service_quota_limits_printer(output):
if output is None:
return
pprint.pprint({"Instances": output})
@beartype
@beartype
def aws_service_quota_limits(
handle,
region: str,
warning_percentage: float,
quota_input: List
) -> List:
sqClient = handle.client('service-quotas',region_name=region)
ec2Client = handle.client('ec2', region_name=region)
result = []
for i in quota_input:
#convert the ApiFilter to a list
#'[{"Name": "vpc-endpoint-type","Values": ["Gateway"]}]'
filterList=''
if len(i.get('ApiFilter')) > 0:
filterList = json.loads(i.get('ApiFilter'))
#print("filter", filterList)
#get quota
sq = sqClient.get_service_quota(
ServiceCode=i.get('ServiceCode'),
QuotaCode=i.get('QuotaCode'))
quotaValue =sq['Quota']['Value']
#simple queries (Only one call to get the details)
if i.get('initialQuery') == '':
#find usage
res = aws_get_paginator(
ec2Client,
i.get('ApiName'),
i.get('ApiParam'),
Filters=filterList
)
#most of the time, all we need is the length (else)
if i.get('QuotaName')=="NAT gateways per Availability Zone":
#sample exception to the else rule
#count the subets per nat gateway
# Create a dictionary to store the count of NAT gateways for each Availability Zone
az_nat_gateway_count = {}
# Loop through each NAT gateway and count the number for each Availability Zone
for nat_gateway in res:
az = nat_gateway['SubnetId']
if az in az_nat_gateway_count:
az_nat_gateway_count[az] += 1
else:
az_nat_gateway_count[az] = 1
for gw , value in az_nat_gateway_count.items():
percentage = value/quotaValue
combinedData = {
'Quota Name': i.get('QuotaName') + ": "+ gw,
'Limit':quotaValue,
'used': value,
'percentage':percentage
}
result.append( combinedData)
#print(combinedData)
else:
#most common default case
count = len(res)
percentage = count/quotaValue
combinedData = {
'Quota Name': i.get('QuotaName'),
'Limit':quotaValue,
'used': count,
'percentage':percentage
}
result.append( combinedData)
#print(combinedData)
#nested queries (get X per VPC or get y per network interface)
else:
#nested query for quota
#for example 'initialQuery': ['describe_vpcs','Vpcs', 'VpcId'] gets the list of VPCs,
#that we can then ask abour each VPC
#turn initalQuery string into a list
#'initialQuery': ['describe_vpcs','Vpcs', 'VpcId']
initialQuery = json.loads(i.get('initialQuery'))
initialQueryName = initialQuery[0]
initialQueryParam = initialQuery[1]
initialQueryFilter = initialQuery[2]
#inital Query
res = aws_get_paginator(ec2Client, initialQueryName, initialQueryParam)
#print(res)
#nested query
for j in res:
#most of the time, there will be a 2nd query, and the table will have
#an 'ApiName' value
#rebuild filter
#print("test", j[initialQueryFilter])
variableReplace = j[initialQueryFilter]
filterList = i.get('ApiFilter')
filterList = filterList.replace("VARIABLE", variableReplace)
filterList = json.loads(filterList)
res2 = aws_get_paginator(
ec2Client,
i.get('ApiName'),
i.get('ApiParam'),
Filters=filterList
)
#most of the time we can just count the length of the response (else)
if i.get('QuotaName') =="Participant accounts per VPC":
print("this is an exception, and you'll ahve to write custom code here")
else:
count = len(res2)
percentage = count/quotaValue
#print(objectResult)
quotaName = f"{i.get('QuotaName')} for {j[initialQueryFilter]}"
combinedData = {
'Quota Name': quotaName,
'Limit':quotaValue,
'used': count,
'percentage':percentage
}
result.append(combinedData)
#print(combinedData)
# all the data is now in a list called result
warning_result =[]
threshold = warning_percentage/100
for quota in result:
if quota['percentage'] >= threshold:
#there are two sums that appear, and throw errors.
if quota['Quota Name'] != 'Inbound or outbound rules per security group':
if quota['Quota Name'] != 'Security groups per network interface':
warning_result.append(quota)
return warning_result
================================================
FILE: AWS/legos/aws_service_quota_limits_vpc/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_service_quota_limits_vpc/__init__.py
================================================
================================================
FILE: AWS/legos/aws_service_quota_limits_vpc/aws_service_quota_limits_vpc.json
================================================
{
"action_title": "AWS VPC service quota limit",
"action_description": "This Action queries all VPC Storage quotas, and returns all usage over warning_percentage.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_service_quota_limits_vpc",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_VPC"]
}
================================================
FILE: AWS/legos/aws_service_quota_limits_vpc/aws_service_quota_limits_vpc.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from __future__ import annotations
import pprint
from typing import List
import json
import datetime
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from beartype import beartype
class InputSchema(BaseModel):
region: str = Field(..., description='AWS Region.', title='Region')
warning_percentage: float = Field(
50,
description='Percentage threshold for a warning. For a complete list of quotas, use 0.',
title='warning_percentage',
)
@beartype
def aws_service_quota_limits_vpc_printer(output):
if output is None:
return
pprint.pprint({"Instances": output})
@beartype
@beartype
def aws_service_quota_limits_vpc(handle, region: str, warning_percentage: float) -> List:
## EC@ and VPCs
ec2Client = handle.client('ec2', region_name=region)
# List all VPCs in the specified region
q_table = [
#per region stats
#per region stats
{
'QuotaName':'VPCs Per Region',
'ServiceCode':'vpc',
'QuotaCode':'L-F678F1CE',
'ApiName': 'describe_vpcs',
'ApiFilter' : '[]',
'ApiParam': 'Vpcs',
'initialQuery': ''
},
{
'QuotaName':'VPC security groups per Region',
'ServiceCode':'vpc',
'QuotaCode': 'L-E79EC296',
'ApiName': 'describe_security_groups',
'ApiFilter' :'[]',
'ApiParam': 'SecurityGroups',
'initialQuery': ''
},
{
'QuotaName':'Egress-only internet gateways per Region',
'ServiceCode':'vpc',
'QuotaCode': 'L-45FE3B85',
'ApiName': 'describe_egress_only_internet_gateways',
'ApiFilter' : '[]',
'ApiParam': 'EgressOnlyInternetGateways',
'initialQuery': ''
},
{
'QuotaName':'Gateway VPC endpoints per Region',
'ServiceCode':'vpc',
'QuotaCode': 'L-1B52E74A',
'ApiName': 'describe_vpc_endpoints',
'ApiFilter' : '[{"Name": "vpc-endpoint-type","Values": ["Gateway"]}]',
'ApiParam': 'VpcEndpoints',
'initialQuery': ''
},
{
'QuotaName':'Internet gateways per Region',
'ServiceCode':'vpc',
'QuotaCode': 'L-A4707A72',
'ApiName': 'describe_internet_gateways',
'ApiFilter' : '[]',
'ApiParam': 'InternetGateways',
'initialQuery': ''
},
{
'QuotaName':'Network interfaces per Region',
'ServiceCode':'vpc',
'QuotaCode': 'L-DF5E4CA3',
'ApiName': 'describe_network_interfaces',
'ApiFilter' : '[]',
'ApiParam': 'NetworkInterfaces',
'initialQuery': ''
},
#per VPC stats
{
'QuotaName':'Active VPC peering connections per VPC',
'ServiceCode':'vpc',
'QuotaCode': 'L-7E9ECCDB',
'ApiName': 'describe_vpc_peering_connections',
'ApiFilter' : '[{"Name": "status-code","Values": ["active"]}, {"Name": "requester-vpc-info.vpc-id","Values": ["VARIABLE"]}]',
'ApiParam': 'VpcPeeringConnections',
'initialQuery': '["describe_vpcs","Vpcs", "VpcId"]'
},
{
'QuotaName':'Interface VPC endpoints per VPC',
'ServiceCode':'vpc',
'QuotaCode': 'L-29B6F2EB',
'ApiName': 'describe_vpc_endpoints',
'ApiFilter' : '[{"Name": "vpc-endpoint-type","Values": ["Interface"]}, {"Name": "vpc-id","Values": ["VARIABLE"]}]',
'ApiParam': 'VpcEndpoints',
'initialQuery': '["describe_vpcs","Vpcs", "VpcId"]'
},
{
'QuotaName':'IPv4 CIDR blocks per VPC',
'ServiceCode':'vpc',
'QuotaCode': 'L-83CA0A9D',
'ApiName': '',
'ApiFilter': '',
'ApiParam': 'CidrBlockAssociationSet',
'initialQuery': '["describe_vpcs","Vpcs", "VpcId"]'
},
{
'QuotaName':' Network ACLs per VPC',
'ServiceCode':'vpc',
'QuotaCode': 'L-B4A6D682',
'ApiName': 'describe_network_acls',
'ApiFilter': '[{"Name": "vpc-id","Values": ["VARIABLE"]}]',
'ApiParam': 'NetworkAcls',
'initialQuery': '["describe_vpcs","Vpcs", "VpcId"]'
},
{
'QuotaName':'Participant accounts per VPC',
'ServiceCode':'vpc',
'QuotaCode': 'L-2C462E13',
'ApiName': 'describe_vpc_peering_connections',
'ApiFilter': '[{"Name": "requester-vpc-info.vpc-id","Values": ["VARIABLE"]}]',
'ApiParam': 'VpcPeeringConnections',
'initialQuery': '["describe_vpcs","Vpcs", "VpcId"]'
},
{
'QuotaName':'Route tables per VPC',
'ServiceCode':'vpc',
'QuotaCode': 'L-589F43AA',
'ApiName': 'describe_route_tables',
'ApiFilter': '[{"Name": "vpc-id","Values": ["VARIABLE"]}]',
'ApiParam': 'RouteTables',
'initialQuery': '["describe_vpcs","Vpcs", "VpcId"]'
},
{
'QuotaName':'Subnets per VPC',
'ServiceCode':'vpc',
'QuotaCode': 'L-407747CB',
'ApiName': 'describe_subnets',
'ApiFilter': '[{"Name": "vpc-id","Values": ["VARIABLE"]}]',
'ApiParam': 'Subnets',
'initialQuery': '["describe_vpcs","Vpcs", "VpcId"]'
},
{
'QuotaName':'NAT gateways per Availability Zone',
'ServiceCode':'vpc',
'QuotaCode': 'L-FE5A380F',
'ApiName': 'describe_nat_gateways',
'ApiFilter': '[]',
'ApiParam': 'NatGateways',
'initialQuery': ''
},
{
'QuotaName':'Inbound or outbound rules per security group',
'ServiceCode':'vpc',
'QuotaCode': 'L-0EA8095F',
'ApiName': 'describe_security_groups',
'ApiFilter': '[]',
'ApiParam': 'SecurityGroups',
'initialQuery': ''
},
{
'QuotaName':'Outstanding VPC peering connection requests',
'ServiceCode':'vpc',
'QuotaCode': 'L-DC9F7029',
'ApiName': 'describe_vpc_peering_connections',
'ApiFilter': '[{"Name": "status-code", "Values": ["pending-acceptance"]}]',
'ApiParam': 'VpcPeeringConnections',
'initialQuery': ''
},
{
'QuotaName':'Routes per route table',
'ServiceCode':'vpc',
'QuotaCode': 'L-93826ACB',
'ApiName': 'describe_route_tables',
'ApiFilter': '[]',
'ApiParam': 'RouteTables',
'initialQuery': ''
},
{
'QuotaName':'Rules per network ACL',
'ServiceCode':'vpc',
'QuotaCode': 'L-2AEEBF1A',
'ApiName': 'describe_network_acls',
'ApiFilter': '[]',
'ApiParam': 'NetworkAcls',
'initialQuery': ''
},
{
'QuotaName':'Security groups per network interface',
'ServiceCode':'vpc',
'QuotaCode': 'L-2AFB9258',
'ApiName': 'describe_network_interfaces',
'ApiFilter': '[]',
'ApiParam': 'NetworkInterfaces',
'initialQuery': ''
},
{
'QuotaName':'VPC peering connection request expiry hours',
'ServiceCode':'vpc',
'QuotaCode': 'L-8312C5BB',
'ApiName': 'describe_vpc_peering_connections',
'ApiFilter': '[{"Name": "expiration-time"}]',
'ApiParam': 'VpcPeeringConnections',
'initialQuery': ''
}
]
#print(q_table)
result = []
sqClient = handle.client('service-quotas',region_name=region)
for i in q_table:
#convert the ApiFilter to a list
#'[{"Name": "vpc-endpoint-type","Values": ["Gateway"]}]'
filterList=''
if len(i.get('ApiFilter')) > 0:
filterList = json.loads(i.get('ApiFilter'))
#print("filter", filterList)
#get quota
sq = sqClient.get_service_quota(
ServiceCode=i.get('ServiceCode'),
QuotaCode=i.get('QuotaCode'))
quotaValue =sq['Quota']['Value']
#simple queries (Only one call to get the details)
if i.get('initialQuery') == '':
#find usage
res = aws_get_paginator(
ec2Client,
i.get('ApiName'),
i.get('ApiParam'),
Filters=filterList
)
#most of the time, all we need is the length (else)
if i.get('QuotaName')=="NAT gateways per Availability Zone":
#count the subets per nat gateway
# Create a dictionary to store the count of NAT gateways for each Availability Zone
az_nat_gateway_count = {}
# Loop through each NAT gateway and count the number for each Availability Zone
for nat_gateway in res:
az = nat_gateway['SubnetId']
if az in az_nat_gateway_count:
az_nat_gateway_count[az] += 1
else:
az_nat_gateway_count[az] = 1
for gw, value in az_nat_gateway_count.items():
percentage = value/quotaValue
combinedData = {
'Quota Name': i.get('QuotaName') + ": "+ gw ,
'Limit':quotaValue,
'used': value,
'percentage':percentage
}
result.append( combinedData)
#print(combinedData)
if i.get('QuotaName')=="Inbound or outbound rules per security group":
for security_group in res:
ruleCount = len(security_group['IpPermissions']) +len(security_group['IpPermissionsEgress'])
percentage = ruleCount/quotaValue
if len(i.get('QuotaName'))>0:
combinedData = {
'Quota Name': i.get('QuotaName') +": "+ security_group['GroupName'],
'Limit':quotaValue,
'used': ruleCount,
'percentage':percentage
}
result.append(combinedData)
#print(combinedData)
if i.get('QuotaName')=="Routes per route table":
for route_table in res:
route_count = len(route_table['Routes'])
route_table_id = route_table['RouteTableId']
percentage = route_count/quotaValue
combinedData = {
'Quota Name': i.get('QuotaName') +": "+ route_table_id ,
'Limit':quotaValue,
'used': route_count,
'percentage':percentage
}
result.append( combinedData)
#print(combinedData)
if i.get('QuotaName')=="Rules per network ACL":
for network_acl in res:
rule_count = len(network_acl['Entries'])
network_acl_id = network_acl['NetworkAclId']
percentage = rule_count/quotaValue
combinedData = {
'Quota Name': i.get('QuotaName') +": "+ network_acl_id ,
'Limit':quotaValue,
'used': rule_count,
'percentage':percentage
}
result.append( combinedData)
#print(combinedData)
if i.get('QuotaName')=="Security groups per network interface":
for network_interface in res:
security_group_count = len(network_interface['Groups'])
network_interface_id = network_interface['NetworkInterfaceId']
percentage = security_group_count/quotaValue
if len(i.get('QuotaName'))>0:
combinedData = {
'Quota Name': i.get('QuotaName') +": "+ network_interface_id ,
'Limit':quotaValue,
'used': security_group_count,
'percentage':percentage
}
result.append(combinedData)
#print(combinedData)
if i.get('QuotaName')=="VPC peering connection request expiry hours":
if len(res)>0:
for peering_connection in res:
expiration_time = peering_connection['ExpirationTime']
current_time = datetime.now(datetime.timezone.utc)
time_remaining = expiration_time - current_time
peering_connection_id = peering_connection['VpcPeeringConnectionId']
percentage = time_remaining/quotaValue
combinedData = {
'Quota Name': i.get('QuotaName') +": "+ peering_connection_id ,
'Limit':quotaValue,
'used': time_remaining,
'percentage':percentage
}
result.append(combinedData)
#print(combinedData)
else:
#most common default case
count = len(res)
percentage = count/quotaValue
combinedData = {
'Quota Name': i.get('QuotaName'),
'Limit':quotaValue,
'used': count,
'percentage':percentage
}
result.append( combinedData)
#print(combinedData)
#nested queries (get X per VPC or get y per network interface)
else:
#nested query for quota
#for example 'initialQuery': ['describe_vpcs','Vpcs', 'VpcId'] gets the list of VPCs,
# that we can then ask abour each VPC
#turn initalQuery string into a list
#'initialQuery': ['describe_vpcs','Vpcs', 'VpcId']
initialQuery = json.loads(i.get('initialQuery'))
initialQueryName = initialQuery[0]
initialQueryParam = initialQuery[1]
initialQueryFilter = initialQuery[2]
#inital Query
res = aws_get_paginator(ec2Client, initialQueryName, initialQueryParam)
#print(res)
#nested query
for j in res:
#most of the time, there will be a 2nd query, and the table will have
#an 'ApiName' value
if len(i.get('ApiName')) >0:
#rebuild filter
#print("test", j[initialQueryFilter])
variableReplace = j[initialQueryFilter]
filterList = i.get('ApiFilter')
filterList = filterList.replace("VARIABLE", variableReplace)
filterList = json.loads(filterList)
res2 = aws_get_paginator(
ec2Client,
i.get('ApiName'),
i.get('ApiParam'),
Filters=filterList
)
#most of the time we can just count the length of the response (else)
if i.get('QuotaName') =="Participant accounts per VPC":
count =0
#there can be zero peering conncetions....
if len(res2) >0:
for connection in res2:
if len(connection['AccepterVpcInfo']['OwnerId']) >0:
count += 1
else:
count = len(res2)
else:
#the value is in the first query, but we need to loop through it
apiParam = i.get('ApiParam')
#print(apiParam, j[apiParam])
count = len(j[apiParam])
percentage = count/quotaValue
#print(objectResult)
quotaName = f"{i.get('QuotaName')} for {j[initialQueryFilter]}"
combinedData = {
'Quota Name': quotaName,
'Limit':quotaValue,
'used': count,
'percentage':percentage
}
result.append(combinedData)
#print(combinedData)
# all the data is now in a list called result
warning_result =[]
threshold = warning_percentage/100
for quota in result:
if quota['percentage'] >= threshold:
#there are two sums that appear, and throw errors.
if quota['Quota Name'] != 'Inbound or outbound rules per security group':
if quota['Quota Name'] != 'Security groups per network interface':
warning_result.append(quota)
return warning_result
================================================
FILE: AWS/legos/aws_stop_instances/README.md
================================================
TBD
================================================
FILE: AWS/legos/aws_stop_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_stop_instances/aws_stop_instances.json
================================================
{
"action_title": "Stop AWS Instances",
"action_description": "Stop an AWS Instance",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_stop_instances",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_verbs": [
"stop"
],
"action_nouns": [
"aws",
"instances"
],
"action_is_remediation": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2"]
}
================================================
FILE: AWS/legos/aws_stop_instances/aws_stop_instances.py
================================================
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
instance_id: str = Field(
title='Instance Id',
description='ID of the instance to be stopped.')
region: str = Field(
title='Region',
description='AWS Region of the instance.')
def aws_stop_instances_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_stop_instances(handle, instance_id: str, region: str) -> Dict:
"""aws_stop_instances Stops instances.
:type instance_id: string
:param instance_id: String containing the name of AWS EC2 instance
:type region: string
:param region: AWS region for instance
:rtype: Dict with the stopped instances state info.
"""
ec2Client = handle.client('ec2', region_name=region)
output = {}
res = ec2Client.stop_instances(InstanceIds=[instance_id])
for instances in res['StoppingInstances']:
output[instances['InstanceId']] = instances['CurrentState']
return output
================================================
FILE: AWS/legos/aws_tag_ec2_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_target_group_list_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_target_group_list_instances/aws_target_group_list_instances.json
================================================
{
"action_title": "AWS List Instances in a ELBV2 Target Group",
"action_description": "List AWS Instance in a ELBv2 Target Group",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_target_group_list_instances",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_EC2", "CATEGORY_TYPE_AWS_ELB"]
}
================================================
FILE: AWS/legos/aws_target_group_list_instances/aws_target_group_list_instances.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from unskript.legos.utils import parseARN
class InputSchema(BaseModel):
arn: str = Field(
title='Target Group ARN',
description='ARN of the Target Group.')
def aws_target_group_list_instances_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_target_group_list_instances(handle, arn: str) -> List:
"""aws_target_group_list_instances List instances in a target group.
:type handle: object
:param handle: Object returned from task.validate(...).
:type arn: string
:param arn: ARN of the Target Group.
:rtype: List of instances with their IPs.
"""
# Input param validation.
# Get the region for the target group.
parsedArn = parseARN(arn)
elbv2Client = handle.client('elbv2', region_name=parsedArn['region'])
ec2Client = handle.client('ec2', region_name=parsedArn['region'])
try:
targetHealthResponse = elbv2Client.describe_target_health(
TargetGroupArn=arn
)
except Exception as e:
print(f'Hit exception getting the instance list: {str(e)}')
raise e
instancesInfo = []
for ins in targetHealthResponse["TargetHealthDescriptions"]:
try:
privateIP = get_instance_private_ip(ec2Client, ins['Target']['Id'])
except Exception:
continue
instanceInfo = {
'InstanceID': ins['Target']['Id'],
'PrivateIP': privateIP
}
instancesInfo.append(instanceInfo)
return instancesInfo
def get_instance_private_ip(ec2Client, instanceID: str) -> str:
try:
resp = ec2Client.describe_instances(
Filters=[
{
'Name': 'instance-id',
'Values': [instanceID]
}
]
)
except Exception as e:
print(f'Failed to get instance details for {instanceID}, err: {str(e)}')
raise e
return resp['Reservations'][0]['Instances'][0]['PrivateIpAddress']
================================================
FILE: AWS/legos/aws_target_group_list_unhealthy_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_target_group_list_unhealthy_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_target_group_list_unhealthy_instances/aws_target_group_list_unhealthy_instances.json
================================================
{
"action_title": " AWS List Unhealthy Instances in a ELBV2 Target Group",
"action_description": " List AWS Unhealthy Instance in a ELBv2 Target Group",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_target_group_list_unhealthy_instances",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ELB"]
}
================================================
FILE: AWS/legos/aws_target_group_list_unhealthy_instances/aws_target_group_list_unhealthy_instances.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from unskript.legos.utils import parseARN
class InputSchema(BaseModel):
arn: str = Field(
title='Target Group ARN',
description='ARN of the Target Group.')
def aws_target_group_list_unhealthy_instances_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_target_group_list_unhealthy_instances(handle, arn: str) -> List:
"""aws_target_group_list_unhealthy_instances returns array of unhealthy instances
:type handle: object
:param handle: Object returned from task.validate(...).
:type arn: string
:param arn: ARN of the Target Group.
:rtype: Returns array of unhealthy instances
"""
# Get the region for the target group.
parsedArn = parseARN(arn)
elbv2Client = handle.client('elbv2', region_name=parsedArn['region'])
try:
targetHealthResponse = elbv2Client.describe_target_health(
TargetGroupArn=arn
)
except Exception as e:
print(f'Hit exception getting the instance list: {str(e)}')
raise e
instancesInfo = []
for ins in targetHealthResponse["TargetHealthDescriptions"]:
if ins['TargetHealth']['State'] in ['unhealthy']:
instancesInfo.append(ins)
return instancesInfo
================================================
FILE: AWS/legos/aws_target_group_register_unregister_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_target_group_register_unregister_instances/__init__.py
================================================
================================================
FILE: AWS/legos/aws_target_group_register_unregister_instances/aws_target_group_register_unregister_instances.json
================================================
{
"action_title": "AWS Register/Unregister Instances from a Target Group.",
"action_description": "Register/Unregister AWS Instance from a Target Group",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_target_group_register_unregister_instances",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_NONE",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_ELB"]
}
================================================
FILE: AWS/legos/aws_target_group_register_unregister_instances/aws_target_group_register_unregister_instances.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from typing import List
from pydantic import BaseModel, Field
from unskript.legos.utils import parseARN
class InputSchema(BaseModel):
arn: str = Field(
title='Target Group ARN',
description='ARN of the Target Group.')
instance_ids: List[str] = Field(
title='Instance IDs',
description='List of instance IDs. For eg. ["i-foo", "i-bar"]')
port: int = Field(
title='Port',
description='The port on which the instances are listening.'
)
unregister: bool = Field(
False,
title='Unregister',
description='Check this if the instances need to be unregistered. By default, it is false.'
)
#All legos should take inputParamsJson as the input.
#They should assume the handle variable is defined already.
def aws_target_group_register_unregister_instances(handle, arn: str, instance_ids: List, port: int,
unregister: bool = False) -> None:
"""aws_target_group_register_unregister_instances Allows register/unregister instances to a
target group.
:type handle: object
:param handle: Object returned from task.validate(...).
:type arn: string
:param arn: ARN of the Target Group.
:type instance_ids: list
:param instance_ids: List of instance IDs.
:type port: int
:param port: The port on which the instances are listening.
:type unregister: bool
:param unregister: Check this if the instances need to be unregistered.
:rtype: None
"""
# Input param validation.
# Get the region for the target group.
parsedArn = parseARN(arn)
elbv2Client = handle.client('elbv2', region_name=parsedArn['region'])
# Create the targets
targets = []
for i in instance_ids:
targets.append({
'Id': i,
'Port': port,
})
try:
if unregister is True:
elbv2Client.deregister_targets({
'TargetGroupArn': arn,
'Targets': targets
})
else:
elbv2Client.register_targets({
'TargetGroupArn': arn,
'Targets': targets
})
except Exception as e:
print(f'Unable to register/unregister: {str(e)}')
raise e
================================================
FILE: AWS/legos/aws_terminate_ec2_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_update_access_key/__init__.py
================================================
##
## Copyright (c) 2022 unSkript, Inc
## All rights reserved.
##
================================================
FILE: AWS/legos/aws_update_access_key/aws_update_access_key.json
================================================
{
"action_title": "AWS Update Access Key",
"action_description": "Update status of the Access Key",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_update_access_key",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_is_check": false,
"action_is_remediation": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_IAM"]
}
================================================
FILE: AWS/legos/aws_update_access_key/aws_update_access_key.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from unskript.enums.aws_access_key_enums import AccessKeyStatus
class InputSchema(BaseModel):
aws_username: str = Field(
title="Username",
description="Username of the IAM User"
)
aws_access_key_id: str = Field(
title="Access Key ID",
description="Old Access Key ID of the User"
)
status: AccessKeyStatus = Field(
title="Status",
description="Status to set for the Access Key"
)
def aws_update_access_key_printer(output):
if output is None:
return
pprint.pprint("Access Key status successfully changed")
pprint.pprint(output)
def aws_update_access_key(
handle,
aws_username: str,
aws_access_key_id: str,
status: AccessKeyStatus
) -> Dict:
"""aws_update_access_key updates the status of an access key to Inactive/Active
:type handle: object
:param handle: Object returned from Task Validate
:type aws_username: str
:param aws_username: Username of the IAM user to be looked up
:type aws_access_key_id: str
:param aws_access_key_id: Old Access Key ID of the user of which the status
needs to be updated
:type status: AccessKeyStatus
:param status: Status to set for the Access Key
:rtype: Result Dictionary of result
"""
iamClient = handle.client('iam')
result = iamClient.update_access_key(
UserName=aws_username,
AccessKeyId=aws_access_key_id,
Status=status
)
retVal = {}
temp_list = []
for key, value in result.items():
if key not in temp_list:
temp_list.append(key)
retVal[key] = value
return retVal
================================================
FILE: AWS/legos/aws_update_ttl_for_route53_records/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_update_ttl_for_route53_records/__init__.py
================================================
================================================
FILE: AWS/legos/aws_update_ttl_for_route53_records/aws_update_ttl_for_route53_records.json
================================================
{
"action_title": "AWS Update TTL for Route53 Record",
"action_description": "Update TTL for an existing record in a hosted zone.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_update_ttl_for_route53_records",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_COST_OPT", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_ROUTE53"]
}
================================================
FILE: AWS/legos/aws_update_ttl_for_route53_records/aws_update_ttl_for_route53_records.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from unskript.enums.aws_route53_record_type_enums import Route53RecordType
class InputSchema(BaseModel):
hosted_zone_id: str = Field(
..., description='ID of the hosted zone in Route53', title='Hosted Zone ID'
)
new_ttl: int = Field(
..., description='New TTL value for a record. Eg: 300', title='New TTL'
)
record_name: str = Field(
...,
description='Name of record in a hosted zone. Eg: example.com',
title='Record Name',
)
record_type: Route53RecordType = Field(
..., description='Record Type of the record.', title='Record Type'
)
def aws_update_ttl_for_route53_records_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_update_ttl_for_route53_records(
handle,
hosted_zone_id: str,
record_name: str,
record_type:Route53RecordType,
new_ttl:int
) -> Dict:
"""aws_update_ttl_for_route53_records updates the TTL for a Route53 record in a hosted zone.
:type handle: object
:param handle: Object returned by the task.validate(...) method.
:type hosted_zone_id: string
:param hosted_zone_id: ID of the hosted zone in Route53
:type record_name: string
:param record_name: Name of record in a hosted zone. Eg: example.com
:type record_type: string
:param record_type: Record Type of the record.
:type new_ttl: int
:param new_ttl: New TTL value for a record. Eg: 300
:rtype: Response of updation on new TTL
"""
route53Client = handle.client('route53')
new_ttl_value = int(new_ttl)
response = route53Client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch={
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': record_name,
'Type': record_type,
'TTL': new_ttl_value
}
}
]
}
)
return response
================================================
FILE: AWS/legos/aws_upload_file_to_s3/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_upload_file_to_s3/__init__.py
================================================
================================================
FILE: AWS/legos/aws_upload_file_to_s3/aws_upload_file_to_s3.json
================================================
{
"action_title": "Upload file to S3",
"action_description": "Upload a local file to S3",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_upload_file_to_s3",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS","CATEGORY_TYPE_AWS_S3"]
}
================================================
FILE: AWS/legos/aws_upload_file_to_s3/aws_upload_file_to_s3.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
bucketName: str = Field(
title='Bucket',
description='Name of the bucket to upload into.')
file: str = Field(
title='File',
description='Name of the local file to upload into bucket. Eg /tmp/file-to-upload')
prefix: str = Field(
default="",
title='Prefix',
description='Prefix to attach to get the final object name to be used in the bucket.')
def aws_upload_file_to_s3_printer(output):
if output is None:
return
pprint.pprint(output)
def aws_upload_file_to_s3(handle, bucketName: str, file: __file__, prefix: str = "") -> str:
"""aws_get_unhealthy_instances returns array of unhealthy instances
:type handle: object
:param handle: Object returned from task.validate(...).
:type bucketName: string
:param bucketName: Name of the bucket to upload into.
:type file: __file__
:param file: Name of the local file to upload into bucket.
:type prefix: string
:param prefix: Prefix to attach to get the final object name to be used in the bucket.
:rtype: Returns array of unhealthy instances
"""
s3 = handle.client('s3')
objName = prefix + file.split("/")[-1]
try:
with open(file, "rb") as f:
s3.upload_fileobj(f, bucketName, objName)
except Exception as e:
raise e
return f"Successfully copied {file} to bucket:{bucketName} object:{objName}"
================================================
FILE: AWS/legos/aws_vpc_service_quota_warning/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: AWS/legos/aws_vpc_service_quota_warning/__init__.py
================================================
================================================
FILE: AWS/legos/aws_vpc_service_quota_warning/aws_vpc_service_quota_warning.json
================================================
{
"action_title": "AWS_VPC_service_quota_warning",
"action_description": "Given an AWS Region and a warning percentage, this Action queries all VPC quota limits, and returns any of Quotas that are over the alert value.",
"action_type": "LEGO_TYPE_AWS",
"action_entry_function": "aws_vpc_service_quota_warning",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_VPC"]
}
================================================
FILE: AWS/legos/aws_vpc_service_quota_warning/aws_vpc_service_quota_warning.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from __future__ import annotations
import pprint
import json
import datetime
from typing import List
from pydantic import BaseModel, Field
from unskript.connectors.aws import aws_get_paginator
from beartype import beartype
class InputSchema(BaseModel):
region: str = Field(..., description='AWS Region.', title='Region')
warning_percentage: float = Field(
50,
description='Percentage threshold for a warning. For a complete list of quotas, use 0.',
title='warning_percentage',
)
@beartype
def aws_vpc_service_quota_warning_printer(output):
if output is None:
return
pprint.pprint({"Instances": output})
@beartype
@beartype
def aws_vpc_service_quota_warning(handle, region: str, warning_percentage: float) -> List:
## EC@ and VPCs
ec2Client = handle.client('ec2', region_name=region)
# List all VPCs in the specified region
q_table = [
#per region stats
#per region stats
{
'QuotaName':'VPCs Per Region',
'ServiceCode':'vpc',
'QuotaCode': 'L-F678F1CE',
'ApiName': 'describe_vpcs',
'ApiFilter' : '[]',
'ApiParam': 'Vpcs',
'initialQuery': ''
},
{
'QuotaName':'VPC security groups per Region',
'ServiceCode':'vpc',
'QuotaCode': 'L-E79EC296',
'ApiName': 'describe_security_groups',
'ApiFilter' :'[]',
'ApiParam': 'SecurityGroups',
'initialQuery': ''
},
{
'QuotaName':'Egress-only internet gateways per Region',
'ServiceCode':'vpc',
'QuotaCode': 'L-45FE3B85',
'ApiName': 'describe_egress_only_internet_gateways',
'ApiFilter' : '[]',
'ApiParam': 'EgressOnlyInternetGateways',
'initialQuery': ''
},
{
'QuotaName':'Gateway VPC endpoints per Region',
'ServiceCode':'vpc',
'QuotaCode': 'L-1B52E74A',
'ApiName': 'describe_vpc_endpoints',
'ApiFilter' : '[{"Name": "vpc-endpoint-type","Values": ["Gateway"]}]',
'ApiParam': 'VpcEndpoints',
'initialQuery': ''
},
{
'QuotaName':'Internet gateways per Region',
'ServiceCode':'vpc',
'QuotaCode': 'L-A4707A72',
'ApiName': 'describe_internet_gateways',
'ApiFilter' : '[]',
'ApiParam': 'InternetGateways',
'initialQuery': ''
},
{
'QuotaName':'Network interfaces per Region',
'ServiceCode':'vpc',
'QuotaCode': 'L-DF5E4CA3',
'ApiName': 'describe_network_interfaces',
'ApiFilter' : '[]',
'ApiParam': 'NetworkInterfaces',
'initialQuery': ''
},
#per VPC stats
{
'QuotaName':'Active VPC peering connections per VPC',
'ServiceCode':'vpc',
'QuotaCode': 'L-7E9ECCDB',
'ApiName': 'describe_vpc_peering_connections',
'ApiFilter' : '[{"Name": "status-code","Values": ["active"]}, {"Name": "requester-vpc-info.vpc-id","Values": ["VARIABLE"]}]',
'ApiParam': 'VpcPeeringConnections',
'initialQuery': '["describe_vpcs","Vpcs", "VpcId"]'
},
{
'QuotaName':'Interface VPC endpoints per VPC',
'ServiceCode':'vpc',
'QuotaCode': 'L-29B6F2EB',
'ApiName': 'describe_vpc_endpoints',
'ApiFilter' : '[{"Name": "vpc-endpoint-type","Values": ["Interface"]}, {"Name": "vpc-id","Values": ["VARIABLE"]}]',
'ApiParam': 'VpcEndpoints',
'initialQuery': '["describe_vpcs","Vpcs", "VpcId"]'
},
{
'QuotaName':'IPv4 CIDR blocks per VPC',
'ServiceCode':'vpc',
'QuotaCode': 'L-83CA0A9D',
'ApiName': '',
'ApiFilter': '',
'ApiParam': 'CidrBlockAssociationSet',
'initialQuery': '["describe_vpcs","Vpcs", "VpcId"]'
},
{
'QuotaName':' Network ACLs per VPC',
'ServiceCode':'vpc',
'QuotaCode': 'L-B4A6D682',
'ApiName': 'describe_network_acls',
'ApiFilter': '[{"Name": "vpc-id","Values": ["VARIABLE"]}]',
'ApiParam': 'NetworkAcls',
'initialQuery': '["describe_vpcs","Vpcs", "VpcId"]'
},
{
'QuotaName':'Participant accounts per VPC',
'ServiceCode':'vpc',
'QuotaCode': 'L-2C462E13',
'ApiName': 'describe_vpc_peering_connections',
'ApiFilter': '[{"Name": "requester-vpc-info.vpc-id","Values": ["VARIABLE"]}]',
'ApiParam': 'VpcPeeringConnections',
'initialQuery': '["describe_vpcs","Vpcs", "VpcId"]'
},
{
'QuotaName':'Route tables per VPC',
'ServiceCode':'vpc',
'QuotaCode': 'L-589F43AA',
'ApiName': 'describe_route_tables',
'ApiFilter': '[{"Name": "vpc-id","Values": ["VARIABLE"]}]',
'ApiParam': 'RouteTables',
'initialQuery': '["describe_vpcs","Vpcs", "VpcId"]'
},
{
'QuotaName':'Subnets per VPC',
'ServiceCode':'vpc',
'QuotaCode': 'L-407747CB',
'ApiName': 'describe_subnets',
'ApiFilter': '[{"Name": "vpc-id","Values": ["VARIABLE"]}]',
'ApiParam': 'Subnets',
'initialQuery': '["describe_vpcs","Vpcs", "VpcId"]'
},
{
'QuotaName':'NAT gateways per Availability Zone',
'ServiceCode':'vpc',
'QuotaCode': 'L-FE5A380F',
'ApiName': 'describe_nat_gateways',
'ApiFilter': '[]',
'ApiParam': 'NatGateways',
'initialQuery': ''
},
{
'QuotaName':'Inbound or outbound rules per security group',
'ServiceCode':'vpc',
'QuotaCode': 'L-0EA8095F',
'ApiName': 'describe_security_groups',
'ApiFilter': '[]',
'ApiParam': 'SecurityGroups',
'initialQuery': ''
},
{
'QuotaName':'Outstanding VPC peering connection requests',
'ServiceCode':'vpc',
'QuotaCode': 'L-DC9F7029',
'ApiName': 'describe_vpc_peering_connections',
'ApiFilter': '[{"Name": "status-code", "Values": ["pending-acceptance"]}]',
'ApiParam': 'VpcPeeringConnections',
'initialQuery': ''
},
{
'QuotaName':'Routes per route table',
'ServiceCode':'vpc',
'QuotaCode': 'L-93826ACB',
'ApiName': 'describe_route_tables',
'ApiFilter': '[]',
'ApiParam': 'RouteTables',
'initialQuery': ''
},
{
'QuotaName':'Rules per network ACL',
'ServiceCode':'vpc',
'QuotaCode': 'L-2AEEBF1A',
'ApiName': 'describe_network_acls',
'ApiFilter':'[]',
'ApiParam': 'NetworkAcls',
'initialQuery': ''
},
{
'QuotaName':'Security groups per network interface',
'ServiceCode':'vpc',
'QuotaCode': 'L-2AFB9258',
'ApiName': 'describe_network_interfaces',
'ApiFilter': '[]',
'ApiParam': 'NetworkInterfaces',
'initialQuery': ''
},
{
'QuotaName':'VPC peering connection request expiry hours',
'ServiceCode':'vpc',
'QuotaCode': 'L-8312C5BB',
'ApiName': 'describe_vpc_peering_connections',
'ApiFilter': '[{"Name": "expiration-time"}]',
'ApiParam': 'VpcPeeringConnections',
'initialQuery': ''
}
]
#print(q_table)
result = []
sqClient = handle.client(
'service-quotas',
region_name=region
)
for i in q_table:
#convert the ApiFilter to a list
#'[{"Name": "vpc-endpoint-type","Values": ["Gateway"]}]'
filterList=''
if len(i.get('ApiFilter')) > 0:
filterList = json.loads(i.get('ApiFilter'))
#print("filter", filterList)
#get quota
sq = sqClient.get_service_quota(
ServiceCode=i.get('ServiceCode'),
QuotaCode=i.get('QuotaCode'))
quotaValue =sq['Quota']['Value']
#simple queries (Only one call to get the details)
if i.get('initialQuery') == '':
#find usage
res = aws_get_paginator(
ec2Client,
i.get('ApiName'),
i.get('ApiParam'),
Filters=filterList
)
#most of the time, all we need is the length (else)
if i.get('QuotaName')=="NAT gateways per Availability Zone":
#count the subets per nat gateway
# Create a dictionary to store the count of NAT gateways for each Availability Zone
az_nat_gateway_count = {}
# Loop through each NAT gateway and count the number for each Availability Zone
for nat_gateway in res:
az = nat_gateway['SubnetId']
if az in az_nat_gateway_count:
az_nat_gateway_count[az] += 1
else:
az_nat_gateway_count[az] = 1
for gw, value in az_nat_gateway_count.items():
percentage = value/quotaValue
combinedData = {
'Quota Name': i.get('QuotaName') + ": "+ gw ,
'Limit':quotaValue,
'used': value,
'percentage':percentage
}
result.append( combinedData)
#print(combinedData)
if i.get('QuotaName')=="Inbound or outbound rules per security group":
for security_group in res:
ruleCount = len(security_group['IpPermissions']) +len(security_group['IpPermissionsEgress'])
percentage = ruleCount/quotaValue
if len(i.get('QuotaName'))>0:
combinedData = {
'Quota Name': i.get('QuotaName') +": "+ security_group['GroupName'] ,
'Limit':quotaValue,
'used': ruleCount,
'percentage':percentage
}
result.append(combinedData)
#print(combinedData)
if i.get('QuotaName')=="Routes per route table":
for route_table in res:
route_count = len(route_table['Routes'])
route_table_id = route_table['RouteTableId']
percentage = route_count/quotaValue
combinedData = {
'Quota Name': i.get('QuotaName') +": "+ route_table_id ,
'Limit':quotaValue,
'used': route_count,
'percentage':percentage
}
result.append( combinedData)
#print(combinedData)
if i.get('QuotaName')=="Rules per network ACL":
for network_acl in res:
rule_count = len(network_acl['Entries'])
network_acl_id = network_acl['NetworkAclId']
percentage = rule_count/quotaValue
combinedData = {
'Quota Name': i.get('QuotaName') +": "+ network_acl_id ,
'Limit':quotaValue,
'used': rule_count,
'percentage':percentage
}
result.append( combinedData)
#print(combinedData)
if i.get('QuotaName')=="Security groups per network interface":
for network_interface in res:
security_group_count = len(network_interface['Groups'])
network_interface_id = network_interface['NetworkInterfaceId']
percentage = security_group_count/quotaValue
if len(i.get('QuotaName'))>0:
combinedData = {
'Quota Name': i.get('QuotaName') +": "+ network_interface_id ,
'Limit':quotaValue,
'used': security_group_count,
'percentage':percentage
}
result.append(combinedData)
#print(combinedData)
if i.get('QuotaName')=="VPC peering connection request expiry hours":
if len(res)>0:
for peering_connection in res:
expiration_time = peering_connection['ExpirationTime']
current_time = datetime.now(datetime.timezone.utc)
time_remaining = expiration_time - current_time
peering_connection_id = peering_connection['VpcPeeringConnectionId']
percentage = time_remaining/quotaValue
combinedData = {
'Quota Name': i.get('QuotaName') +": "+ peering_connection_id ,
'Limit':quotaValue,
'used': time_remaining,
'percentage':percentage
}
result.append(combinedData)
#print(combinedData)
else:
#most common default case
count = len(res)
percentage = count/quotaValue
combinedData = {
'Quota Name': i.get('QuotaName'),
'Limit':quotaValue,
'used': count,
'percentage':percentage
}
result.append( combinedData)
#print(combinedData)
#nested queries (get X per VPC or get y per network interface)
else:
#nested query for quota
#for example 'initialQuery': ['describe_vpcs','Vpcs', 'VpcId'] gets
#the list of VPCs,that we can then ask abour each VPC
#turn initalQuery string into a list
#'initialQuery': ['describe_vpcs','Vpcs', 'VpcId']
initialQuery = json.loads(i.get('initialQuery'))
initialQueryName = initialQuery[0]
initialQueryParam = initialQuery[1]
initialQueryFilter = initialQuery[2]
#inital Query
res = aws_get_paginator(ec2Client, initialQueryName, initialQueryParam)
#print(res)
#nested query
for j in res:
#most of the time, there will be a 2nd query, and the table will
# have an 'ApiName' value
if len(i.get('ApiName')) >0:
#rebuild filter
#print("test", j[initialQueryFilter])
variableReplace = j[initialQueryFilter]
filterList = i.get('ApiFilter')
filterList = filterList.replace("VARIABLE", variableReplace)
filterList = json.loads(filterList)
res2 = aws_get_paginator(
ec2Client,
i.get('ApiName'),
i.get('ApiParam'),
Filters=filterList
)
#most of the time we can just count the length of the response (else)
if i.get('QuotaName') =="Participant accounts per VPC":
count =0
#there can be zero peering conncetions....
if len(res2) >0:
for connection in res2:
if len(connection['AccepterVpcInfo']['OwnerId']) >0:
count += 1
else:
count = len(res2)
else:
#the value is in the first query, but we need to loop through it
apiParam = i.get('ApiParam')
#print(apiParam, j[apiParam])
count = len(j[apiParam])
percentage = count/quotaValue
quotaName = f"{i.get('QuotaName')} for {j[initialQueryFilter]}"
combinedData = {
'Quota Name': quotaName,
'Limit':quotaValue,
'used': count,
'percentage':percentage
}
result.append(combinedData)
#print(combinedData)
# all the data is now in a list called result
warning_result =[]
threshold = warning_percentage/100
for quota in result:
if quota['percentage'] >= threshold:
#there are two sums that appear, and throw errors.
if quota['Quota Name'] != 'Inbound or outbound rules per security group':
if quota['Quota Name'] != 'Security groups per network interface':
warning_result.append(quota)
return warning_result
================================================
FILE: Airflow/README.md
================================================
# Airflow Actions
* [Get Status for given DAG](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Airflow/legos/airflow_check_dag_status/README.md): Get Status for given DAG
* [Get Airflow handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Airflow/legos/airflow_get_handle/README.md): Get Airflow handle
* [List DAG runs for given DagID](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Airflow/legos/airflow_list_DAG_runs/README.md): List DAG runs for given DagID
* [Airflow trigger DAG run](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Airflow/legos/airflow_trigger_dag_run/README.md): Airflow trigger DAG run
================================================
FILE: Airflow/__init__.py
================================================
================================================
FILE: Airflow/legos/__init__.py
================================================
================================================
FILE: Airflow/legos/airflow_check_dag_status/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Azure/legos/azure_get_handle/__init__.py
================================================
================================================
FILE: Azure/legos/azure_get_handle/azure_get_handle.json
================================================
{
"action_title": "Get Azure Handle",
"action_description": "Get Azure Handle",
"action_type": "LEGO_TYPE_AZURE",
"action_entry_function": "azure_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_supports_iteration": false,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_AZURE" ]
}
================================================
FILE: Azure/legos/azure_get_handle/azure_get_handle.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def azure_get_handle(handle):
"""azure_get_handle returns the azure handle.
:rtype: Azure Handle.
"""
return handle
================================================
FILE: Datadog/README.md
================================================
# Datadog Actions
* [Datadog delete incident](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Datadog/legos/datadog_delete_incident/README.md): Delete an incident given its id
* [Datadog get event](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Datadog/legos/datadog_get_event/README.md): Get an event given its id
* [Get Datadog Handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Datadog/legos/datadog_get_handle/README.md): Get Datadog Handle
* [Datadog get incident](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Datadog/legos/datadog_get_incident/README.md): Get an incident given its id
* [Datadog get metric metadata](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Datadog/legos/datadog_get_metric_metadata/README.md): Get the metadata of a metric.
* [Datadog get monitor](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Datadog/legos/datadog_get_monitor/README.md): Get details about a monitor
* [Datadog get monitorID given the name](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Datadog/legos/datadog_get_monitorid/README.md): Get monitorID given the name
* [Datadog list active metrics](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Datadog/legos/datadog_list_active_metrics/README.md): Get the list of actively reporting metrics from a given time until now.
* [Datadog list all monitors](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Datadog/legos/datadog_list_all_monitors/README.md): List all monitors
* [Datadog list metrics](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Datadog/legos/datadog_list_metrics/README.md): Lists metrics from the last 24 hours in Datadog.
* [Datadog mute/unmute monitors](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Datadog/legos/datadog_mute_or_unmute_alerts/README.md): Mute/unmute monitors
* [Datadog query metrics](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Datadog/legos/datadog_query_metrics/README.md): Query timeseries points for a metric.
* [Schedule downtime](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Datadog/legos/datadog_schedule_downtime/README.md): Schedule downtime
* [Datadog search monitors](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Datadog/legos/datadog_search_monitors/README.md): Search monitors in datadog based on filters
================================================
FILE: Datadog/__init__.py
================================================
================================================
FILE: Datadog/legos/__init__.py
================================================
================================================
FILE: Datadog/legos/datadog_delete_incident/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Datadog/legos/datadog_get_handle/__init__.py
================================================
================================================
FILE: Datadog/legos/datadog_get_handle/datadog_get_handle.json
================================================
{
"action_title": "Get Datadog Handle",
"action_description": "Get Datadog Handle",
"action_type": "LEGO_TYPE_DATADOG",
"action_entry_function": "datadog_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_supports_iteration": false
}
================================================
FILE: Datadog/legos/datadog_get_handle/datadog_get_handle.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def datadog_get_handle(handle):
"""datadog_get_handle returns the Datadog handle.
:rtype: Datadog Handle.
"""
return handle
================================================
FILE: Datadog/legos/datadog_get_incident/README.md
================================================
[\n", "
1) Cluster Health Check
2) Disable shard allocation
3) Shut down node
4) Perform changes/ maintenance
5) Start the node
6) Reenable shard allocation
7) Cluster Health Check
This action checks the status of an Elasticsearch cluster to trigger a rolling restart for the cluster. Ideally, the cluster should show Green/ None in which case Step 2 will not be triggered. These are the cluster statuses that you may encounter-
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 10, "id": "ee0a70e8-74d3-43f9-9a0a-a7e3b1989565", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "85e40f4fbed1df45b80cdf78eef44ac8a77605316ee1df76820dbd7e518c629b", "checkEnabled": false, "collapsed": true, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Elasticsearch Check Health Status", "execution_data": { "last_date_success_run_cell": "2023-02-17T08:26:50.997Z" }, "id": 80, "index": 80, "inputschema": [ { "properties": {}, "title": "elasticsearch_check_health_status", "type": "object" } ], "isUnskript": false, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "legotype": "LEGO_TYPE_ELASTICSEARCH", "name": "Elasticsearch Cluster Health", "nouns": [], "orderProperties": [], "output": { "type": "" }, "outputParams": { "output_name": "cluster_health", "output_name_enabled": true }, "printOutput": true, "tags": [ "elasticsearch_check_health_status" ], "trusted": true, "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "import subprocess\n", "import pprint\n", "from pydantic import BaseModel, Field\n", "from typing import Dict, Tuple\n", "from subprocess import PIPE\n", "import json\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def elasticsearch_check_health_status_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def elasticsearch_check_health_status(handle) -> Tuple:\n", " result = []\n", " cluster_health ={}\n", " \"\"\"elasticsearch_check_health_status checks the status of an Elasticsearch cluster .\n", "\n", " :type handle: object\n", " :param handle: Object returned from Task Validate\n", "\n", " :rtype: Result Dict of result\n", " \"\"\"\n", "\n", " output = handle.web_request(\"/_cluster/health?pretty\", # Path\n", " \"GET\", # Method\n", " None) # Data\n", " if output['status'] != 'green':\n", " cluster_health[output['cluster_name']] = output['status'] \n", " result.append(cluster_health)\n", " if len(result) != 0:\n", " return(False, result)\n", " else:\n", " return(True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(outputName=\"cluster_health\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(elasticsearch_check_health_status, lego_printer=elasticsearch_check_health_status_printer, hdl=hdl, args=args)" ] }, { "cell_type": "code", "execution_count": 12, "id": "1cb9e668-e088-44f3-9012-d7b9f6589e7f", "metadata": { "collapsed": true, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-17T08:28:12.189Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Get status value", "orderProperties": [], "tags": [], "title": "Get status value", "trusted": true, "credentialsJson": {} }, "outputs": [], "source": [ "cluster_health_status = ''\n", "for cluster in cluster_health:\n", " if type(cluster)==list:\n", " if len(cluster)!=0:\n", " for x in cluster:\n", " for status in x.values():\n", " cluster_health_status= status\n", " else:\n", " cluster_health_status = 'None'\n", "print(cluster_health_status)" ] }, { "cell_type": "markdown", "id": "696df79e-d970-4f28-ad3e-7502b40c77a6", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action takes the following parameters:
\n", "None
Using unSkript's Elasticsearch Disable Shard Allocation action we can disable shard allocation to avoid rebalancing of missing shards while the node shutdown process is in progress. This step ensures that no new shards are assigned till the node restarts.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 12, "id": "e746026e-cb23-4fc7-b551-a2a70edcb81a", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "23abf6572cb81c61e965514d011c6636363d10be6ed1ac6b178127fd090ed462", "checkEnabled": false, "condition_enabled": true, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Elasticsearch Disable Shard Allocation for any indices", "execution_data": { "last_date_success_run_cell": "2023-02-16T06:05:40.223Z" }, "id": 74, "index": 74, "inputschema": [ { "properties": {}, "title": "elasticsearch_disable_shard_allocation", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_ELASTICSEARCH", "name": "Elasticsearch Disable Shard Allocation", "nouns": [], "orderProperties": [], "output": { "type": "" }, "printOutput": true, "startcondition": "cluster_health_status!='None'", "tags": [ "elasticsearch_disable_shard_allocation" ], "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "import subprocess\n", "import pprint\n", "from pydantic import BaseModel, Field\n", "from typing import List, Dict\n", "from subprocess import PIPE, run\n", "import json\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def elasticsearch_disable_shard_allocation_printer(output):\n", " if output is None:\n", " return\n", " print(\"Shard allocations disabled for any kind shards\")\n", " print(output)\n", "\n", "\n", "@beartype\n", "def elasticsearch_disable_shard_allocation(handle) -> Dict:\n", " \"\"\"elasticsearch_disable_shard_allocation disallows shard allocations for any indices.\n", "\n", " :type handle: object\n", " :param handle: Object returned from Task Validate\n", "\n", " :rtype: Result Dict of result\n", " \"\"\"\n", "\n", " es_dict = {\"transient\": {\"cluster.routing.allocation.enable\": \"none\"}}\n", " output = handle.web_request(\"/_cluster/settings?pretty\", # Path\n", " \"PUT\", # Method\n", " es_dict) # Data\n", "\n", " return output\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"cluster_health_status!='None'\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(elasticsearch_disable_shard_allocation, lego_printer=elasticsearch_disable_shard_allocation_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "8a0d9d6e-2aef-4f23-81a2-feb6be5c4c0a", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 3", "orderProperties": [], "tags": [], "title": "Step 3" }, "source": [ "This action takes the following parameters:
\n", "None
In this step we can perform maintenance jobs, install updates or even modify the elasticsearch.yml. We can create a custom action (Click on Add button on the top) as per the requirement and add it in this step.
\n", "This article explains some of the common issues incurred by Elasticsearch clusters- link to blog
\n", "Note- Please make sure that the configuration changes don't cause the failure of a node restart in the next step\n",
""
]
},
{
"cell_type": "markdown",
"id": "80b61c80-85bb-48e9-9501-92151a9156c8",
"metadata": {
"jupyter": {
"source_hidden": false
},
"name": "Step 5",
"orderProperties": [],
"tags": [],
"title": "Step 5"
},
"source": [
"This action checks the status of an Elasticsearch cluster after restart. Ideally, the cluster should show Green status after a successfull restart. These are the cluster statuses that you may encounter-
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 8, "id": "3a19e8d6-8163-4504-9395-f27bb28729c5", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": false, "actionSupportsPoll": false, "action_modified": false, "action_uuid": "4590490856e040f305f080b411c392a054142f152696902a4724250aaa057b02", "checkEnabled": false, "collapsed": true, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Get Elasticsearch Handle", "execution_data": { "last_date_success_run_cell": "2023-02-17T08:25:35.019Z" }, "id": 81, "index": 81, "inputschema": [ { "properties": {}, "title": "elasticsearch_get_handle", "type": "object" } ], "isUnskript": false, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "legotype": "LEGO_TYPE_ELASTICSEARCH", "name": "Get Elasticsearch Handle", "nouns": [], "orderProperties": [], "output": { "type": "" }, "outputParams": { "output_name": "handle", "output_name_enabled": true }, "printOutput": true, "tags": [ "elasticsearch_get_handle" ], "trusted": true, "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "##\n", "## Copyright (c) 2021 unSkript, Inc\n", "## All rights reserved.\n", "##\n", "from pydantic import BaseModel\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def elasticsearch_get_handle(handle):\n", " \"\"\"elasticsearch_get_handle returns the elasticsearch client handle.\n", "\n", " :rtype: elasticsearch client handle.\n", " \"\"\"\n", " return handle\n", "\n", "\n", "def unskript_default_printer(output):\n", " if isinstance(output, (list, tuple)):\n", " for item in output:\n", " print(f'item: {item}')\n", " elif isinstance(output, dict):\n", " for item in output.items():\n", " print(f'item: {item}')\n", " else:\n", " print(f'Output for {task.name}')\n", " print(output)\n", "\n", "task = Task(Workflow())\n", "task.configure(outputName=\"handle\")\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(elasticsearch_get_handle, lego_printer=unskript_default_printer, hdl=hdl, args=args)" ] }, { "cell_type": "code", "execution_count": 15, "id": "a89d952a-40ea-43d4-ad1d-9456c339fee9", "metadata": { "collapsed": true, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-17T08:29:19.684Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Check Cluster Health", "orderProperties": [], "tags": [], "title": "Check Cluster Health", "trusted": true, "credentialsJson": {} }, "outputs": [], "source": [ "from unskript.legos.elasticsearch.elasticsearch_check_health_status.elasticsearch_check_health_status import elasticsearch_check_health_status\n", "\n", "output = elasticsearch_check_health_status(handle=handle)\n", "cluster_health_status = ''\n", "for cluster in output:\n", " if type(cluster)==list:\n", " if len(cluster)!=0:\n", " for x in cluster:\n", " for status in x.values():\n", " cluster_health_status= status\n", " else:\n", " cluster_health_status = 'green'\n", "print(\"Cluster Status: \",cluster_health_status)" ] }, { "cell_type": "markdown", "id": "b20dd1a3-fb5f-4bb1-b5cd-2494d752b930", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action takes the following parameters:
\n", "None
In this Runbook, we were able to perform rolling restart on a node in an Elasticsearch cluster using unSkript's Elasticsearch and SSH legos. This runbooks can be re triggered for mutiple clusters in a sequence. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Elasticsearch Rolling restart", "parameters": [ "cmd_start_elasticsearch", "cmd_stop_elasticsearch", "host_for_ssh", "run_with_sudo" ] }, "kernelspec": { "display_name": "Python 3.10.6 64-bit", "language": "python", "name": "python3" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "parameterSchema": { "properties": { "cmd_start_elasticsearch": { "default": "sudo systemctl start elasticsearch.service", "description": "Command to start Elasticsearch service", "title": "cmd_start_elasticsearch", "type": "string" }, "cmd_stop_elasticsearch": { "default": "sudo systemctl stop elasticsearch.service", "description": "Command to stop the Elasticsearch service", "title": "cmd_stop_elasticsearch", "type": "string" }, "host_for_ssh": { "default": "[]", "description": "Host IP of elasticsearch server to SSH in List format. Eg: [123.45.67.89]", "title": "host_for_ssh", "type": "array" }, "run_with_sudo": { "default": false, "description": "Run commands to start/stop elasticsearch with sudo", "title": "run_with_sudo", "type": "boolean" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": { "host_for_ssh": "[]" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: ElasticSearch/Elasticsearch_Rolling_Restart.json ================================================ { "name": "Elasticsearch Rolling restart", "description": "This runbook can be used to perform rolling restart on ES", "uuid": "7b308783a38a72461839e7bd1d13fbb4e8559d4b291a1454be39c40a2f026ce2", "icon": "CONNECTOR_TYPE_ELASTICSEARCH", "categories": [ "CATEGORY_TYPE_ES" ], "connector_types": [ "CONNECTOR_TYPE_ELASTICSEARCH" ], "version": "1.0.0" } ================================================ FILE: ElasticSearch/README.md ================================================ # ElasticSearch RunBooks * [Elasticsearch Rolling restart](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/ElasticSearch/Elasticsearch_Rolling_Restart.ipynb): This runbook can be used to perform rolling restart on ES # ElasticSearch Actions * [Elasticsearch Cluster Health](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/ElasticSearch/legos/elasticsearch_check_health_status/README.md): Elasticsearch Check Health Status * [Get large Elasticsearch Index size](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/ElasticSearch/legos/elasticsearch_check_large_index_size/README.md): This action checks the sizes of all indices in the Elasticsearch cluster and compares them to a given threshold. * [Check Elasticsearch cluster disk size](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/ElasticSearch/legos/elasticsearch_compare_cluster_disk_size_to_threshold/README.md): This action compares the disk usage percentage of the Elasticsearch cluster to a given threshold. * [Elasticsearch Delete Unassigned Shards](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/ElasticSearch/legos/elasticsearch_delete_unassigned_shards/README.md): Elasticsearch Delete Corrupted/Lost Shards * [Elasticsearch Disable Shard Allocation](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/ElasticSearch/legos/elasticsearch_disable_shard_allocation/README.md): Elasticsearch Disable Shard Allocation for any indices * [Elasticsearch Enable Shard Allocation](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/ElasticSearch/legos/elasticsearch_enable_shard_allocation/README.md): Elasticsearch Enable Shard Allocation for any shards for any indices * [Elasticsearch Cluster Statistics](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/ElasticSearch/legos/elasticsearch_get_cluster_statistics/README.md): Elasticsearch Cluster Statistics fetches total index size, disk size, and memory utilization and information about the current nodes and shards that form the cluster * [Get Elasticsearch Handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/ElasticSearch/legos/elasticsearch_get_handle/README.md): Get Elasticsearch Handle * [Get Elasticsearch index level health](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/ElasticSearch/legos/elasticsearch_get_index_health/README.md): This action checks the health of a given Elasticsearch index or all indices if no specific index is provided. * [Elasticsearch List Allocations](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/ElasticSearch/legos/elasticsearch_list_allocations/README.md): Elasticsearch List Allocations in a Cluster * [Elasticsearch List Nodes](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/ElasticSearch/legos/elasticsearch_list_nodes/README.md): Elasticsearch List Nodes in a Cluster * [Elasticsearch search](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/ElasticSearch/legos/elasticsearch_search_query/README.md): Elasticsearch Search ================================================ FILE: ElasticSearch/__init__.py ================================================ ================================================ FILE: ElasticSearch/legos/__init__.py ================================================ # # unSkript (c) 2022 # ================================================ FILE: ElasticSearch/legos/elasticsearch_check_health_status/README.md ================================================ [
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: ElasticSearch/legos/elasticsearch_check_health_status/__init__.py
================================================
================================================
FILE: ElasticSearch/legos/elasticsearch_check_health_status/elasticsearch_check_health_status.json
================================================
{
"action_title": "Elasticsearch Cluster Health",
"action_description": "Elasticsearch Check Health Status",
"action_type": "LEGO_TYPE_ELASTICSEARCH",
"action_entry_function": "elasticsearch_check_health_status",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_is_check":true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_ES"],
"action_next_hop":["7b308783a38a72461839e7bd1d13fbb4e8559d4b291a1454be39c40a2f026ce2"]
}
================================================
FILE: ElasticSearch/legos/elasticsearch_check_health_status/elasticsearch_check_health_status.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Tuple, Optional
class InputSchema(BaseModel):
unassigned_shards: Optional[int] = Field(
20,
description='Threshold number of unassigned shards. Default - 20',
title='Number of unassigned shards'
)
def elasticsearch_check_health_status_printer(output):
if output is None:
return
print(output)
def elasticsearch_check_health_status(handle, unassigned_shards:int = 20) -> Tuple:
"""elasticsearch_check_health_status checks the status of an Elasticsearch cluster .
:type handle: object
:param handle: Object returned from Task Validate
:rtype: Result Tuple of result
"""
output = handle.web_request("/_cluster/health?pretty", "GET", None)
# Early return if cluster status is green
if output['status'] == 'green':
return (True, None)
cluster_health = {
"cluster_name": output['cluster_name'],
"status": output['status'],
"unassigned_shards": output['unassigned_shards']
}
# Check for significant health issues
if output['unassigned_shards'] > unassigned_shards:
return (False, [cluster_health]) # Return immediately if unassigned shards exceed the threshold
# Additional checks for severe conditions
if output['status'] == 'red' or output['delayed_unassigned_shards'] > 0 or output['initializing_shards'] > 0 or output['relocating_shards'] > 0 or output['number_of_nodes'] != output['number_of_data_nodes']:
additional_details = {
"delayed_unassigned_shards": output['delayed_unassigned_shards'],
"initializing_shards": output['initializing_shards'],
"relocating_shards": output['relocating_shards'],
"number_of_nodes": output['number_of_nodes'],
"number_of_data_nodes": output['number_of_data_nodes']
}
cluster_health.update(additional_details)
return (False, [cluster_health])
# If status is yellow but no additional critical issues, consider it healthy
return (True, None)
================================================
FILE: ElasticSearch/legos/elasticsearch_check_large_index_size/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: ElasticSearch/legos/elasticsearch_check_large_index_size/__init__.py
================================================
================================================
FILE: ElasticSearch/legos/elasticsearch_check_large_index_size/elasticsearch_check_large_index_size.json
================================================
{
"action_title": "Get large Elasticsearch Index size",
"action_description": "This action checks the sizes of all indices in the Elasticsearch cluster and compares them to a given threshold.",
"action_type": "LEGO_TYPE_ELASTICSEARCH",
"action_entry_function": "elasticsearch_check_large_index_size",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": [
""
],
"action_next_hop_parameter_mapping": {},
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: ElasticSearch/legos/elasticsearch_check_large_index_size/elasticsearch_check_large_index_size.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import Tuple, Optional
from pydantic import BaseModel,Field
class InputSchema(BaseModel):
threshold: Optional[float] = Field(
10485760, # 10GB in KB
description='Threshold for index size in KB.',
title='Threshold (in KB)'
)
def elasticsearch_check_large_index_size_printer(result):
success, alerts = result
if success:
print("Index sizes are within the threshold.")
return
for alert in alerts:
print(f"Alert! Index size of {alert['indexSizeKB']} KB for index {alert['index']} exceeds threshold of {alert['threshold']} KB.")
def elasticsearch_check_large_index_size(handle, threshold: float = 10485760) -> Tuple:
"""
elasticsearch_check_large_index_size checks the sizes of all indices in the
Elasticsearch cluster and compares them to a given threshold.
:type handle: object
:param handle: Object returned from Task Validate
:type threshold: float
:param threshold: The threshold for index size in KB.
:return: Status, alerts (if any index size exceeds the threshold).
"""
alerts = []
try:
# Request the list of all indices
indices_output = handle.web_request("/_cat/indices?h=index", "GET", None)
indices_output = ''.join(indices_output).split('\n')
indices_output = [index for index in indices_output if index and not index.startswith('.')]
for current_index in indices_output:
# Request the stats for the current index
stats_output = handle.web_request(f"/{current_index}/_stats", "GET", None)
index_size_bytes = stats_output['_all']['total']['store']['size_in_bytes']
index_size_KB = index_size_bytes / 1024
# Check if the index size exceeds the threshold
if index_size_KB > threshold:
alerts.append({
'index': current_index,
'indexSizeKB': index_size_KB,
'threshold': threshold
})
except Exception as e:
raise e
if len(alerts) != 0:
return (False, alerts)
return (True, None)
================================================
FILE: ElasticSearch/legos/elasticsearch_compare_cluster_disk_size_to_threshold/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: ElasticSearch/legos/elasticsearch_compare_cluster_disk_size_to_threshold/__init__.py
================================================
================================================
FILE: ElasticSearch/legos/elasticsearch_compare_cluster_disk_size_to_threshold/elasticsearch_compare_cluster_disk_size_to_threshold.json
================================================
{
"action_title": "Check Elasticsearch cluster disk size",
"action_description": "This action compares the disk usage percentage of the Elasticsearch cluster to a given threshold.",
"action_type": "LEGO_TYPE_ELASTICSEARCH",
"action_entry_function": "elasticsearch_compare_cluster_disk_size_to_threshold",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": [
""
],
"action_next_hop_parameter_mapping": {},
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: ElasticSearch/legos/elasticsearch_compare_cluster_disk_size_to_threshold/elasticsearch_compare_cluster_disk_size_to_threshold.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import Optional, Tuple
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
threshold: Optional[float] = Field(
80, description='Threshold for disk usage percentage.', title='Threshold (in %)'
)
def elasticsearch_compare_cluster_disk_size_to_threshold_printer(output):
success, data = output
if success:
print("Cluster disk usage is within the threshold.")
else:
for item in data:
print(f"Alert! Cluster disk usage of {item['usage_disk_percentage']}% exceeds the threshold of {item['threshold']}%.")
def elasticsearch_compare_cluster_disk_size_to_threshold(handle, threshold: float=80.0) -> Tuple:
"""
elasticsearch_compare_cluster_disk_size_to_threshold compares the disk usage percentage of the Elasticsearch cluster to a given threshold.
:type handle: object
:param handle: Object returned from Task Validate
:type threshold: float
:param threshold: The threshold for disk usage percentage.
:return: Status, result (if any exceeding the threshold).
"""
# Request the allocation stats
allocation_output = handle.web_request("/_cat/allocation?v", "GET", None)
# Split the lines and skip the header
lines = allocation_output.splitlines()[1:]
# Calculate the max disk percentage from the lines, considering only assigned nodes
max_disk_percent = 0 # Initialize to 0 or an appropriately low number
for line in lines:
if "UNASSIGNED" not in line:
disk_usage = float(line.split()[5])
max_disk_percent = max(max_disk_percent, disk_usage)
if max_disk_percent > threshold:
result = [{"usage_disk_percentage": max_disk_percent, "threshold": threshold}]
return (False, result)
return (True, None)
================================================
FILE: ElasticSearch/legos/elasticsearch_delete_unassigned_shards/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: ElasticSearch/legos/elasticsearch_delete_unassigned_shards/__init__.py
================================================
================================================
FILE: ElasticSearch/legos/elasticsearch_delete_unassigned_shards/elasticsearch_delete_unassigned_shards.json
================================================
{
"action_title": "Elasticsearch Delete Unassigned Shards",
"action_description": "Elasticsearch Delete Corrupted/Lost Shards",
"action_type": "LEGO_TYPE_ELASTICSEARCH",
"action_entry_function": "elasticsearch_delete_unassigned_shards",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_ES"]
}
================================================
FILE: ElasticSearch/legos/elasticsearch_delete_unassigned_shards/elasticsearch_delete_unassigned_shards.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import subprocess
from pydantic import BaseModel, Field
from subprocess import PIPE, Popen
class InputSchema(BaseModel):
pass
def elasticsearch_delete_unassigned_shards_printer(output):
if output is None:
return
print(output)
def elasticsearch_delete_unassigned_shards(handle) -> str:
"""elasticsearch_delete_lost_shards deleted any corrupted/lost shards .
:type handle: object
:param handle: Object returned from Task Validate
:rtype: Result String of result
"""
output = handle.web_request("/_cat/shards?v=true&h=index,shard,prirep,state,node,unassigned.reason&s=state&pretty", # Path
"GET", # Method
None) # Data
list_of_shards = []
for line in str(output).split('\n'):
if "UNASSIGNED" in line:
list_of_shards.append(line.split(" ")[0])
output2 = ''
if len(list_of_shards) != 0:
output2 = handle.web_request("/" + list_of_shards, # Path
"DELETE", # Method
None) # Data
o = output2
if o == '':
result = "No Unassigned shards found"
return result
result = "Successfully deleted unassigned shards"
return result
================================================
FILE: ElasticSearch/legos/elasticsearch_disable_shard_allocation/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: ElasticSearch/legos/elasticsearch_disable_shard_allocation/__init__.py
================================================
================================================
FILE: ElasticSearch/legos/elasticsearch_disable_shard_allocation/elasticsearch_disable_shard_allocation.json
================================================
{
"action_title": "Elasticsearch Disable Shard Allocation",
"action_description": "Elasticsearch Disable Shard Allocation for any indices",
"action_type": "LEGO_TYPE_ELASTICSEARCH",
"action_entry_function": "elasticsearch_disable_shard_allocation",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_ES"]
}
================================================
FILE: ElasticSearch/legos/elasticsearch_disable_shard_allocation/elasticsearch_disable_shard_allocation.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import subprocess
import pprint
from pydantic import BaseModel, Field
from typing import List, Dict
from subprocess import PIPE, run
import json
class InputSchema(BaseModel):
pass
def elasticsearch_disable_shard_allocation_printer(output):
if output is None:
return
print("Shard allocations disabled for any kind shards")
print(output)
def elasticsearch_disable_shard_allocation(handle) -> Dict:
"""elasticsearch_disable_shard_allocation disallows shard allocations for any indices.
:type handle: object
:param handle: Object returned from Task Validate
:rtype: Result Dict of result
"""
es_dict = {"transient": {"cluster.routing.allocation.enable": "none"}}
output = handle.web_request("/_cluster/settings?pretty", # Path
"PUT", # Method
es_dict) # Data
return output
================================================
FILE: ElasticSearch/legos/elasticsearch_enable_shard_allocation/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: ElasticSearch/legos/elasticsearch_enable_shard_allocation/__init__.py
================================================
================================================
FILE: ElasticSearch/legos/elasticsearch_enable_shard_allocation/elasticsearch_enable_shard_allocation.json
================================================
{
"action_title": "Elasticsearch Enable Shard Allocation",
"action_description": "Elasticsearch Enable Shard Allocation for any shards for any indices",
"action_type": "LEGO_TYPE_ELASTICSEARCH",
"action_entry_function": "elasticsearch_enable_shard_allocation",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_ES"]
}
================================================
FILE: ElasticSearch/legos/elasticsearch_enable_shard_allocation/elasticsearch_enable_shard_allocation.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import subprocess
import pprint
from pydantic import BaseModel, Field
from typing import List, Dict
from subprocess import PIPE, run
import json
class InputSchema(BaseModel):
pass
def elasticsearch_enable_shard_allocation_printer(output):
if output is None:
return
print("Shard allocations enabled for all kinds of shards")
print(output)
def elasticsearch_enable_shard_allocation(handle) -> Dict:
"""elasticsearch_enable_shard_allocation enables shard allocations for any shards for any indices.
:type handle: object
:param handle: Object returned from Task Validate
:rtype: Result Dict of result
"""
es_dict = {"transient": {"cluster.routing.allocation.enable": "all"}}
output = handle.web_request("/_cluster/settings?pretty", # Path
"PUT", # Method
es_dict) # Data
return output
================================================
FILE: ElasticSearch/legos/elasticsearch_get_cluster_statistics/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: ElasticSearch/legos/elasticsearch_get_cluster_statistics/__init__.py
================================================
================================================
FILE: ElasticSearch/legos/elasticsearch_get_cluster_statistics/elasticsearch_get_cluster_statistics.json
================================================
{
"action_title": "Elasticsearch Cluster Statistics",
"action_description": "Elasticsearch Cluster Statistics fetches total index size, disk size, and memory utilization and information about the current nodes and shards that form the cluster",
"action_type": "LEGO_TYPE_ELASTICSEARCH",
"action_entry_function": "elasticsearch_get_cluster_statistics",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories":["CATEGORY_TYPE_INFORMATION" , "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_ES"]
}
================================================
FILE: ElasticSearch/legos/elasticsearch_get_cluster_statistics/elasticsearch_get_cluster_statistics.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
from typing import List, Dict
import matplotlib.pyplot as plt
import pandas as pd
from datetime import datetime
from tabulate import tabulate
class InputSchema(BaseModel):
pass
def elasticsearch_get_cluster_statistics_printer(output):
if output is None:
return
timestamp = datetime.fromtimestamp(output.get('timestamp')/1000) # converting milliseconds to seconds
print("\nCluster Name: ", output.get('cluster_name'))
print("Timestamp: ", timestamp)
print("Status: ", output.get('status'))
# Node Statistics
print("\nNode Statistics")
nodes = output.get('_nodes')
if nodes is not None:
df = pd.DataFrame.from_records([nodes])
print(tabulate(df, headers='keys', tablefmt='psql', showindex=False))
else:
print("Nodes are None")
# Document Statistics
print("\nDocument Statistics")
df = pd.DataFrame.from_records([output.get('indices').get('docs')])
df.columns = [f'{i} (count)' for i in df.columns]
print(tabulate(df, headers='keys', tablefmt='psql', showindex=False))
# Shard Statistics
print("\nShard Statistics")
df = pd.DataFrame.from_records([output.get('indices').get('shards').get('index')])
df.columns = [f'{i} (shard count)' for i in df.columns]
print(tabulate(df, headers='keys', tablefmt='psql', showindex=False))
# Additional Metrics
print("\nAdditional Metrics")
additional_metrics = {
'total_index_size (MB)': output.get('total_index_size'),
'total_disk_size (MB)': output.get('total_disk_size'),
'total_memory_utilization (%)': output.get('total_memory_utilization'),
}
df = pd.DataFrame.from_records([additional_metrics])
print(tabulate(df, headers='keys', tablefmt='psql', showindex=False))
def elasticsearch_get_cluster_statistics(handle) -> Dict:
"""elasticsearch_get_cluster_statistics fetches total index size, disk size, and memory utilization
and information about the current nodes and shards that form the cluster
:type handle: object
:param handle: Object returned from Task Validate
:rtype: Result Dict of result
"""
try:
# Fetching cluster statistics
output = handle.web_request("/_cluster/stats?human&pretty", "GET", None)
# Fetching indices statistics
indices_stats = handle.web_request("/_cat/indices?format=json", "GET", None)
# Fetching nodes statistics
nodes_stats = handle.web_request("/_nodes/stats?human&pretty", "GET", None)
total_index_size = 0
for index in indices_stats:
size = index['store.size']
if 'kb' in size:
total_index_size += float(size.replace('kb', '')) / 1024
elif 'mb' in size:
total_index_size += float(size.replace('mb', ''))
elif 'gb' in size:
total_index_size += float(size.replace('gb', '')) * 1024
total_disk_size = sum(float(node['fs']['total']['total_in_bytes']) for node in nodes_stats['nodes'].values())
total_disk_size /= (1024 * 1024) # convert from bytes to MB
total_memory = sum(float(node['jvm']['mem']['heap_used_percent']) for node in nodes_stats['nodes'].values())
# Adding additional metrics to the output
output['total_index_size'] = total_index_size
output['total_disk_size'] = total_disk_size
output['total_memory_utilization'] = total_memory
except Exception as e:
raise e
return output
================================================
FILE: ElasticSearch/legos/elasticsearch_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: ElasticSearch/legos/elasticsearch_get_handle/__init__.py
================================================
================================================
FILE: ElasticSearch/legos/elasticsearch_get_handle/elasticsearch_get_handle.json
================================================
{
"action_title": "Get Elasticsearch Handle",
"action_description": "Get Elasticsearch Handle",
"action_type": "LEGO_TYPE_ELASTICSEARCH",
"action_entry_function": "elasticsearch_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_supports_iteration": false,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_ES"]
}
================================================
FILE: ElasticSearch/legos/elasticsearch_get_handle/elasticsearch_get_handle.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def elasticsearch_get_handle(handle):
"""elasticsearch_get_handle returns the elasticsearch client handle.
:rtype: elasticsearch client handle.
"""
return handle
================================================
FILE: ElasticSearch/legos/elasticsearch_get_index_health/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: ElasticSearch/legos/elasticsearch_get_index_health/__init__.py
================================================
================================================
FILE: ElasticSearch/legos/elasticsearch_get_index_health/elasticsearch_get_index_health.json
================================================
{
"action_title": "Get Elasticsearch index level health",
"action_description": "This action checks the health of a given Elasticsearch index or all indices if no specific index is provided.",
"action_type": "LEGO_TYPE_ELASTICSEARCH",
"action_entry_function": "elasticsearch_get_index_health",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_ES"],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: ElasticSearch/legos/elasticsearch_get_index_health/elasticsearch_get_index_health.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Tuple, Optional
class InputSchema(BaseModel):
index_name: Optional[str] = Field(
'',
description='Name of the index for which the health is checked. If no index is provided, the health of all indices is checked.',
title='Index name',
)
def elasticsearch_get_index_health_printer(result):
success, outputs = result
if success or outputs is None or len(outputs) == 0:
print("No indices found with 'yellow' or 'red' health.")
return
for output in outputs:
print(f"\nProcessing index: {output['index']}")
print("--------------------------------------------------")
print(f"Health: {output['health']}")
print(f"Status: {output['status']}")
print(f"Documents count: {output['docs.count']}")
print(f"Documents deleted: {output['docs.deleted']}")
print(f"Store size: {output['store.size']}")
print(f"Primary shards: {output['pri']}")
print(f"Replicas: {output['rep']}")
print("\nKey Settings:")
print(f" number_of_shards: {output['settings'].get('number_of_shards')}")
print(f" number_of_replicas: {output['settings'].get('number_of_replicas')}")
print("--------------------------------------------------")
def elasticsearch_get_index_health(handle, index_name="") -> Tuple:
"""
elasticsearch_get_index_health checks the health of a given Elasticsearch index or all indices if no specific index is provided.
:type handle: object
:param handle: Object returned from Task Validate
:type index_name: str
:param index_name: Name of the index for which the health is checked. If no index is provided, the health of all indices is checked.
:rtype: list
:return: A list of dictionaries where each dictionary contains stats about each index
"""
try:
health_url = f"/_cat/indices/{index_name}?v&h=index,health&format=json" if index_name else "/_cat/indices?v&h=index,health&format=json"
health_response = handle.web_request(health_url, "GET", None)
if not health_response:
print(f"No indices found or error retrieving indices: {health_response.get('error', 'No response') if health_response else 'No data'}")
return (True, None)
# Filter indices that are not 'green'
problematic_indices = [
{"index": idx['index'], "health": idx['health']}
for idx in health_response if idx['health'] != 'green'
]
if not problematic_indices:
print("All indices are in good health.")
return (True, None)
except Exception as e:
print(f"Error processing index health: {str(e)}")
return (False, [])
return (False, problematic_indices)
================================================
FILE: ElasticSearch/legos/elasticsearch_list_allocations/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: ElasticSearch/legos/elasticsearch_list_allocations/__init__.py
================================================
================================================
FILE: ElasticSearch/legos/elasticsearch_list_allocations/elasticsearch_list_allocations.json
================================================
{
"action_title": "Elasticsearch List Allocations",
"action_description": "Elasticsearch List Allocations in a Cluster",
"action_type": "LEGO_TYPE_ELASTICSEARCH",
"action_entry_function": "elasticsearch_list_allocations",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_ES"]
}
================================================
FILE: ElasticSearch/legos/elasticsearch_list_allocations/elasticsearch_list_allocations.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import subprocess
from subprocess import PIPE
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
pass
def elasticsearch_list_allocations_printer(output):
if output is None:
return
print(output)
def elasticsearch_list_allocations(handle) -> str:
"""elasticsearch_list_allocations lists the allocations of an Elasticsearch cluster .
:type handle: object
:param handle: Object returned from Task Validate
:rtype: Result String of result
"""
output = handle.web_request("/_cat/allocation?v=true&pretty", # Path
"GET", # Method
None) # Data
return output
================================================
FILE: ElasticSearch/legos/elasticsearch_list_nodes/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: ElasticSearch/legos/elasticsearch_list_nodes/__init__.py
================================================
================================================
FILE: ElasticSearch/legos/elasticsearch_list_nodes/elasticsearch_list_nodes.json
================================================
{
"action_title": "Elasticsearch List Nodes",
"action_description": "Elasticsearch List Nodes in a Cluster",
"action_type": "LEGO_TYPE_ELASTICSEARCH",
"action_entry_function": "elasticsearch_list_nodes",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_ES"]
}
================================================
FILE: ElasticSearch/legos/elasticsearch_list_nodes/elasticsearch_list_nodes.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import subprocess
from subprocess import PIPE
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
pass
def elasticsearch_list_nodes_printer(output):
if output is None:
return
print(output)
def elasticsearch_list_nodes(handle) -> str:
"""elasticsearch_list_nodes lists the nodes of an Elasticsearch cluster .
:type handle: object
:param handle: Object returned from Task Validate
:rtype: Result String of result
"""
output = handle.web_request("/_cat/nodes?v=true&pretty", # Path
"GET", # Method
None) # Data
return output
================================================
FILE: ElasticSearch/legos/elasticsearch_search_query/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: ElasticSearch/legos/elasticsearch_search_query/__init__.py
================================================
================================================
FILE: ElasticSearch/legos/elasticsearch_search_query/elasticsearch_search_query.json
================================================
{
"action_title": "Elasticsearch search",
"action_description": "Elasticsearch Search",
"action_type": "LEGO_TYPE_ELASTICSEARCH",
"action_entry_function": "elasticsearch_search_query",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_ES"]
}
================================================
FILE: ElasticSearch/legos/elasticsearch_search_query/elasticsearch_search_query.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import json
from pydantic import BaseModel, Field
from typing import List, Dict
class InputSchema(BaseModel):
query: str = Field(
title='Query',
description='Query string in compact Lucene query syntax. For eg: foo:bar'
)
index: str = Field(
'',
title='Index',
description='A comma-separated list of index names to search; use _all or empty string to perform the operation on all indices.'
)
size: int = Field(
'100',
title='Number of hits to return.',
description='The number of hits to return.'
)
sort: list = Field(
None,
title='List of fields to sort on.',
description='Comma separated field names. For eg. [{"order_date":"desc"}, "order_id"]',
)
fields: List[str] = Field(
None,
title='List of fields to return.',
description='Comma separated list of fields to return. For eg. ["customer_name", "order_id"]'
)
def elasticsearch_search_query_printer(output):
for num,doc in enumerate(output):
print(f'DOC ID: {doc["_id"]}')
print(json.dumps(doc["_source"]))
def elasticsearch_search_query(handle,
query: str,
index: str = '',
size: int = 100,
sort: List = None,
fields: List = None) -> List:
"""elasticsearch_search Does an elasticsearch search on the provided query.
:type handle: object
:param handle: Object returned from Task Validate
:type query: str
:param query: Query String
:type index: str
:param index: Index, Optional variable for the elasticsearch query
:type size: int
:param size: Size, Optional variable Size
:type sort: List
:param sort: Sort, Optional List
:type fields: List
:param fields: Fields, Optional List
:rtype: Result Dictionary of result
"""
# Input param validation.
result = {}
data = handle.search(query={"query_string": {"query": query}}, index=index, size=size, sort=sort, _source=fields)
print(f"Got {data['hits']['total']['value']} Hits: ")
result = data['hits']['hits']
return result
================================================
FILE: GCP/README.md
================================================
# GCP Actions
* [Add lifecycle policy to GCP storage bucket](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_add_lifecycle_policy_to_bucket/README.md): The action adds a lifecycle policy to a Google Cloud Platform (GCP) storage bucket.
* [GCP Add Member to IAM Role](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_add_member_to_iam_role/README.md): Adding member to the IAM role which already available
* [GCP Add Role to Service Account](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_add_role_to_service_account/README.md): Adding role and member to the service account
* [Create GCP Bucket](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_create_bucket/README.md): Create a new GCP bucket in the given location
* [Create a GCP disk snapshot](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_create_disk_snapshot/README.md): Create a GCP disk snapshot.
* [Create GCP Filestore Instance](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_create_filestore_instance/README.md): Create a new GCP Filestore Instance in the given location
* [Create GKE Cluster](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_create_gke_cluster/README.md): Create GKE Cluster
* [GCP Create Service Account](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_create_service_account/README.md): GCP Create Service Account
* [Delete GCP Bucket](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_delete_bucket/README.md): Delete a GCP bucket
* [Delete GCP Filestore Instance](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_delete_filestore_instance/README.md): Delete a GCP Filestore Instance in the given location
* [Delete an Object from GCP Bucket](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_delete_object_from_bucket/README.md): Delete an Object/Blob from a GCP Bucket
* [GCP Delete Service Account](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_delete_service_account/README.md): GCP Delete Service Account
* [GCP Describe a GKE cluster](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_describe_gke_cluster/README.md): GCP Describe a GKE cluster
* [Fetch Objects from GCP Bucket](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_fetch_objects_from_bucket/README.md): List all Objects in a GCP bucket
* [Get GCP storage buckets without lifecycle policies](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_get_buckets_without_lifecycle_policies/README.md): The action retrieves a list of Google Cloud Platform (GCP) storage buckets that do not have any lifecycle policies applied.
* [Get details of GCP forwarding rules](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_get_forwarding_rules_details/README.md): Get details of forwarding rules associated with a backend service.
* [Get GCP Handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_get_handle/README.md): Get GCP Handle
* [Get List of GCP compute instance without label](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_get_instances_without_label/README.md): Get List of GCP compute instance without label
* [Get unused GCP backend services](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_get_unused_backend_services/README.md): Get unused backend service for an application load balancer that has no instances in it's target group.
* [List all GCP Buckets](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_list_buckets/README.md): List all GCP buckets
* [Get GCP compute instances](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_list_compute_instances/README.md): Get GCP compute instances
* [Get List of GCP compute instance by label](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_list_compute_instances_by_label/README.md): Get List of GCP compute instance by label
* [Get list compute instance by VPC](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_list_compute_instances_by_vpc/README.md): Get list compute instance by VPC
* [GCP List GKE Cluster](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_list_gke_cluster/README.md): GCP List GKE Cluster
* [GCP List Nodes in GKE Cluster](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_list_nodes_in_gke_cluster/README.md): GCP List Nodes of GKE Cluster
* [List all Public GCP Buckets](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_list_public_buckets/README.md): List all publicly available GCP buckets
* [List GCP Secrets](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_list_secrets/README.md): List of your GCP Secrets
* [GCP List Service Accounts](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_list_service_accounts/README.md): GCP List Service Accounts
* [List all GCP VMs and if Publicly Accessible](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_list_vms_access/README.md): Lists all GCP buckets, and identifies those tha are public.
* [GCP Remove Member from IAM Role](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_remove_member_from_iam_role/README.md): Remove member from the chosen IAM role.
* [GCP Remove Role from Service Account](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_remove_role_from_service_account/README.md): Remove role and member from the service account
* [Remove role from user](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_remove_user_role/README.md): GCP lego for removing a role from a user (default: 'viewer')
* [GCP Resize a GKE cluster](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_resize_gke_cluster/README.md): GCP Resize a GKE cluster by modifying nodes
* [GCP Restart compute instance](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_restart_compute_instances/README.md): GCP Restart compute instance
* [Restore GCP disk from a snapshot ](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_restore_disk_from_snapshot/README.md): Restore a GCP disk from a compute instance snapshot.
* [Save CSV to Google Sheets](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_save_csv_to_google_sheets_v1/README.md): Saves your CSV (see notes) into a prepared Google Sheet.
* [GCP Stop compute instance](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_stop_compute_instances/README.md): GCP Stop compute instance
* [Upload an Object to GCP Bucket](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/GCP/legos/gcp_upload_file_to_bucket/README.md): Upload an Object/Blob in a GCP bucket
================================================
FILE: GCP/__init__.py
================================================
================================================
FILE: GCP/legos/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_add_lifecycle_policy_to_bucket/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_add_lifecycle_policy_to_bucket/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_add_lifecycle_policy_to_bucket/gcp_add_lifecycle_policy_to_bucket.json
================================================
{
"action_title": "Add lifecycle policy to GCP storage bucket",
"action_description": "The action adds a lifecycle policy to a Google Cloud Platform (GCP) storage bucket.",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_add_lifecycle_policy_to_bucket",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" , "CATEGORY_TYPE_GCP", "CATEGORY_TYPE_GCP_STORAGE"]
}
================================================
FILE: GCP/legos/gcp_add_lifecycle_policy_to_bucket/gcp_add_lifecycle_policy_to_bucket.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from google.cloud import storage
class InputSchema(BaseModel):
age: int = Field(
default=3,
description='Age (in days) of bucket to add to lifecycle policy.',
title='Age (in days)',
)
bucket_name: str = Field(
description='GCP storage bucket name.',
title='Bucket Name'
)
def gcp_add_lifecycle_policy_to_bucket_printer(output):
if output is None:
return
print(output)
def gcp_add_lifecycle_policy_to_bucket(handle, bucket_name:str, age:int) -> str:
"""gcp_add_lifecycle_policy_to_bucket Returns the string of response of adding a lifecycle policy to a storage bucket
:type handle: object
:param handle: Object returned from Task Validate
:type age: int
:param age: Age (in days) of bucket to add to lifecycle policy.
:type bucket_name: string
:param bucket_name: GCP storage bucket name.
:rtype: Response of adding a lifecycle policy to a storage bucket
"""
storageClient = storage.Client(credentials= handle)
bucket = storageClient.get_bucket(bucket_name)
try:
bucket.add_lifecycle_delete_rule(age=age)
except Exception as e:
raise e
bucket.patch()
return f"Added lifecycle policy to {bucket.name} which will delete object after {age} days of creation."
================================================
FILE: GCP/legos/gcp_add_member_to_iam_role/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_add_member_to_iam_role/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_add_member_to_iam_role/gcp_add_member_to_iam_role.json
================================================
{
"action_title": "GCP Add Member to IAM Role",
"action_description": "Adding member to the IAM role which already available",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_add_member_to_iam_role",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_IAM" ]
}
================================================
FILE: GCP/legos/gcp_add_member_to_iam_role/gcp_add_member_to_iam_role.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
import pprint
from typing import List,Any, Dict
from googleapiclient import discovery
class InputSchema(BaseModel):
project_id: str = Field(
title = "Project ID",
description = "Name of the project e.g unskript-dev"
)
role: str = Field(
title = "Role Name",
description = "Permission name assign to member e.g iam.serviceAccountUser"
)
member_email: str = Field(
title = "Member Email",
description = "Member email which has GCP access e.g test@company.com"
)
version: int = Field(
title = "Requested Policy Version",
description = "Requested Policy Version"
)
def gcp_add_member_to_iam_role_printer(output):
if output is None:
return
pprint.pprint(output)
def gcp_add_member_to_iam_role(handle, project_id: str, role: str, member_email:str, version:int = 1) -> Dict:
"""gcp_add_member_to_iam_role Returns a Dict of policy details
:type project_id: string
:param project_id: Name of the project
:type role: string
:param role: Permission name assign to member e.g iam.serviceAccountUser
:type member_email: string
:param member_email: Member email which has GCP access e.g test@company.com
:type version: int
:param version: Requested Policy Version
:rtype: Dict of policy details
"""
service = discovery.build(
"cloudresourcemanager", "v1", credentials=handle)
result = {}
try:
get_policy = (
service.projects().getIamPolicy(
resource=project_id,
body={"options": {"requestedPolicyVersion": version}}).execute())
member = "user:" + member_email
if "gserviceaccount" in member_email:
member = "serviceAccount:" + member_email
binding = None
get_role = "roles/" + role
for b in get_policy["bindings"]:
if b["role"] == get_role:
binding = b
break
if binding is not None:
binding["members"].append(member)
else:
binding = {"role": get_role, "members": [member]}
get_policy["bindings"].append(binding)
add_member = (
service.projects()
.setIamPolicy(resource=project_id, body={"policy": get_policy}).execute())
result = add_member
except Exception as error:
raise error
return result
================================================
FILE: GCP/legos/gcp_add_role_to_service_account/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_add_role_to_service_account/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_add_role_to_service_account/gcp_add_role_to_service_account.json
================================================
{
"action_title": "GCP Add Role to Service Account",
"action_description": "Adding role and member to the service account",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_add_role_to_service_account",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_IAM" ]
}
================================================
FILE: GCP/legos/gcp_add_role_to_service_account/gcp_add_role_to_service_account.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
import pprint
from typing import List,Any, Dict
from googleapiclient import discovery
class InputSchema(BaseModel):
project_id: str = Field(
title = "Project ID",
description = "Name of the project e.g unskript-dev"
)
role: str = Field(
title = "Role Name",
description = "Role name from which member needs to remove e.g iam.serviceAccountUser"
)
member_email: str = Field(
title = "Member Email",
description = "Member email which has GCP access e.g test@company.com"
)
sa_id: str = Field(
title = "Service Account Email",
description = "Service Account email id e.g test-user@unskript-dev.iam.gserviceaccount.com"
)
def gcp_add_role_to_service_account_printer(output):
if output is None:
return
pprint.pprint(output)
def gcp_add_role_to_service_account(handle, project_id: str, role: str, member_email:str, sa_id:str) -> Dict:
"""gcp_add_role_to_service_account Returns a Dict of new policy details
:type project_id: string
:param project_id: Name of the project
:type role: string
:param role: Role name from which member needs to remove e.g iam.serviceAccountUser
:type member_email: string
:param member_email: Member email which has GCP access e.g test@company.com
:type sa_id: string
:param sa_id: Service Account email
:rtype: Dict of new policy details
"""
service = discovery.build('iam', 'v1', credentials=handle)
result = {}
try:
resource = f'projects/{project_id}/serviceAccounts/{sa_id}'
request = service.projects().serviceAccounts().getIamPolicy(resource=resource)
response = request.execute()
member = "user:" + member_email
if "gserviceaccount" in member_email:
member = "serviceAccount:" + member_email
get_role = "roles/" + role
if "bindings" not in response:
add_role = {'version': 1,
'bindings': [{'role': get_role,
'members': [member]}]}
response = add_role
else:
add_role = {
"role": get_role,
"members": [member]}
response["bindings"].append(add_role)
set_policy = service.projects().serviceAccounts().setIamPolicy(resource=resource, body={"policy": response})
policy_output = set_policy.execute()
result = policy_output
except Exception as error:
raise error
return result
================================================
FILE: GCP/legos/gcp_create_bucket/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_create_bucket/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_create_bucket/gcp_create_bucket.json
================================================
{
"action_title": "Create GCP Bucket",
"action_description": "Create a new GCP bucket in the given location",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_create_bucket",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["create"],
"action_nouns": ["bucket","gcp"],
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_BUCKET" ]
}
================================================
FILE: GCP/legos/gcp_create_bucket/gcp_create_bucket.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from beartype import beartype
import pprint
from typing import List,Any, Dict
from google.cloud import storage
class InputSchema(BaseModel):
bucket_name: str = Field(
title = "Bucket Name",
description = "Name of the bucket to be created"
)
project_name: str = Field(
'',
title = "GCP Project",
description = "GCP Project Name"
)
storage_class: str = Field(
'STANDARD',
title = "Storage Class",
description = "Storage class to be assigned to the new bucket. Eg- STANDARD, COLDLINE"
)
location: str = Field(
'us',
title = "Location",
description = "GCP location where bucket should be created. Eg- US"
)
def gcp_create_bucket_printer(output):
if output is None:
return
print(f"Created bucket {output['name']} in {output['location']} with storage class {output['location']}")
def gcp_create_bucket(handle, bucket_name: str, location: str, project_name: str,storage_class: str) -> Dict:
"""gcp_create_bucket Returns a Dict of details of the newly created bucket
:type bucket_name: string
:param bucket_name: Name of the bucket to be created
:type project_name: string
:param project_name: GCP Project Name
:type storage_class: string
:param storage_class: Storage class to be assigned to the new bucket
:type location: string
:param location: GCP location where bucket should be created
:rtype: Dict of Bucket Details
"""
result={}
try:
storage_client = storage.Client(credentials=handle)
bucket = storage_client.bucket(bucket_name)
bucket.storage_class = storage_class
new_bucket = storage_client.create_bucket(bucket,location=location, project=project_name)
result["name"]= new_bucket.name
result["location"]= new_bucket.location
result["storage_class"]= new_bucket.storage_class
except Exception as e:
raise e
return result
================================================
FILE: GCP/legos/gcp_create_disk_snapshot/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_create_disk_snapshot/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_create_disk_snapshot/gcp_create_disk_snapshot.json
================================================
{
"action_title": "Create a GCP disk snapshot",
"action_description": "Create a GCP disk snapshot.",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_create_disk_snapshot",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_VM"]
}
================================================
FILE: GCP/legos/gcp_create_disk_snapshot/gcp_create_disk_snapshot.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import Dict, Optional
from pydantic import BaseModel, Field
from google.cloud.compute_v1.services.disks import DisksClient
from google.cloud.compute_v1.types import Snapshot
class InputSchema(BaseModel):
project: str = Field(..., description='GCP Project Name', title='GCP Project')
zone: str = Field(
...,
description='GCP Zone where instance list should be gotten from',
title='Zone',
)
disk: str = Field(
..., description='The name of the disk to create a snapshot of.', title='Disk name'
)
snapshot_name: str = Field(
'',
description='The name of the snapshot to create. If not provided, a name will be automatically generated.',
title='Snapshot name',
)
def gcp_create_disk_snapshot_printer(output):
if output is None:
return
print(output)
def gcp_create_disk_snapshot(handle, project: str, zone:str, disk: str, snapshot_name: str) -> str:
"""gcp_create_disk_snapshot Returns the confirmation of snapshot creation.
:type project: string
:param project: Google Cloud Platform Project
:type zone: string
:param zone: Zone to which the instance list in the project should be fetched.
:type disk: string
:param disk: The name of the disk to create a snapshot of.
:type snapshot_name: string
:param snapshot_name: The name of the snapshot to create. If not provided, a name will be automatically generated.
:rtype: String of snapshot creation confirmation
"""
disks_client = DisksClient(credentials=handle)
snapshot = Snapshot(name=snapshot_name)
try:
disks_client.create_snapshot(
project=project, zone=zone, disk=disk, snapshot_resource=snapshot
)
except Exception as e:
raise e
return f"Snapshot {snapshot_name} created."
================================================
FILE: GCP/legos/gcp_create_filestore_instance/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_create_filestore_instance/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_create_filestore_instance/gcp_create_filestore_instance.json
================================================
{
"action_title": "Create GCP Filestore Instance",
"action_description": "Create a new GCP Filestore Instance in the given location",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_create_filestore_instance",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["create"],
"action_nouns": ["filestore","instance","gcp"],
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_FILE_STORE" ]
}
================================================
FILE: GCP/legos/gcp_create_filestore_instance/gcp_create_filestore_instance.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from google.cloud import filestore_v1
from google.protobuf.json_format import MessageToDict
from typing import List, Dict
import pprint
class InputSchema(BaseModel):
instance_id: str = Field(
title = "Instance ID",
description = "Name of the instance to create"
)
project_name: str = Field(
title = "GCP Project Name(ID)",
description = "GCP Project Name"
)
location: str = Field(
title = "Location",
description = "GCP locations map to GCP zones Eg: us-west1-b"
)
network: str = Field(
'default',
title = "Network",
description = "Name of the Google Compute Engine VPC network"
)
description: str = Field(
'',
max_length= 2048,
title = "Description",
description = "Description of the instance (2048 characters or less)"
)
name: str = Field(
title = "Name",
description = "Resource name of the instance"
)
capacity: int = Field(
title = "Capacity",
description = "File share capacity in gigabytes (GB). Eg: 1024 "
)
tier: str = Field(
title = "Tier",
description = "Service tier for instance Eg: STANDARD"
)
def gcp_create_filestore_instance_printer(output):
if output is None:
return
pprint.pprint(output)
def gcp_create_filestore_instance(handle, instance_id:str, project_name:str, location:str, network:str, tier:str, description:str, name:str, capacity:int ) -> Dict:
"""gcp_create_filestore_instance Returns a Dict of details of the newly created Filestore Instance
:type instance_id: string
:param instance_id: Name of the instance to create
:type project_name: string
:param project_name: GCP Project Name
:type location: string
:param location: GCP locations map to GCP zones Eg: us-west1-b
:type network: string
:param network: Name of the Google Compute Engine VPC network
:type tier: string
:param tier: Service tier for instance Eg: STANDARD
:type description: string
:param description: Description of the instance (2048 characters or less)
:type name: string
:param name: Resource name of the instance
:type capacity: int
:param capacity: File share capacity in gigabytes (GB). Eg: 1024
:rtype: Dict of Filestore Instance Details
"""
try:
instance_details_dict= {"networks": [{"network": network,"modes": ["MODE_IPV4"]}],"tier": tier.upper(),"description": description,"file_shares": [{"name": name,"capacity_gb": capacity}]}
parent_path = "projects/"+project_name+"/locations/"+location
client = filestore_v1.CloudFilestoreManagerClient(credentials=handle)
request = filestore_v1.CreateInstanceRequest(parent=parent_path,instance=instance_details_dict,instance_id=instance_id)
operation = client.create_instance(request=request)
print("Waiting for operation to complete...")
response = operation.result()
result_dict = MessageToDict(response._pb)
except Exception as e:
raise e
return result_dict
================================================
FILE: GCP/legos/gcp_create_gke_cluster/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_create_gke_cluster/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_create_gke_cluster/gcp_create_gke_cluster.json
================================================
{
"action_title": "Create GKE Cluster",
"action_description": "Create GKE Cluster",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_create_gke_cluster",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_GKE" ]
}
================================================
FILE: GCP/legos/gcp_create_gke_cluster/gcp_create_gke_cluster.py
================================================
import pprint
from typing import List, Dict
from pydantic import BaseModel, Field
from google.cloud import container_v1
from google.protobuf.json_format import MessageToDict
class InputSchema(BaseModel):
project_id: str = Field(
title = "GCP Project",
description = "GCP Project Name"
)
zone: str = Field(
title = "Zone",
description = "GCP Zone where instance list should be gotten from"
)
cluster_name: str = Field(
title = "Cluster Name",
description = "Name of the GKE cluster."
)
node_count: str = Field(
title = "Initial Node Count",
description = "Node count of GKE cluster."
)
def gcp_create_gke_cluster_printer(output):
if len(output) == 0:
return
pprint.pprint(output)
def gcp_create_gke_cluster(handle, project_id: str, zone: str, cluster_name: str, node_count: int) -> Dict:
"""gcp_create_gke_cluster Returns the dict of cluster info
:type project_id: string
:param project_id: Google Cloud Platform Project
:type zone: string
:param zone: Zone to which the cluster in the project should be fetched.
:type cluster_name: string
:param cluster_name: Name of the GKE cluster.
:type node_count: int
:param node_count: Node count of GKE cluster.
:rtype: Dict of cluster info
"""
# Create a client
client = container_v1.ClusterManagerClient(credentials=handle)
try:
res = client.create_cluster(project_id=project_id,
zone=zone,
cluster={'name':cluster_name,
'initial_node_count':node_count})
response = MessageToDict(res._pb)
except Exception as error:
raise error
return response
================================================
FILE: GCP/legos/gcp_create_service_account/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_create_service_account/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_create_service_account/gcp_create_service_account.json
================================================
{
"action_title": "GCP Create Service Account",
"action_description": "GCP Create Service Account",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_create_service_account",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_IAM" ]
}
================================================
FILE: GCP/legos/gcp_create_service_account/gcp_create_service_account.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
import pprint
from typing import List,Any, Dict
import googleapiclient.discovery
class InputSchema(BaseModel):
project_id: str = Field(
title = "Project ID",
description = "Name of the project e.g unskript-dev"
)
accountId: str = Field(
title = "Account ID",
description = "Name for the service account e.g test-account"
)
display_name: str = Field(
title = "Display Name",
description = "Display Name for the service account e.g test-account"
)
def gcp_create_service_account_printer(output):
if output is None:
return
pprint.pprint(output)
def gcp_create_service_account(handle, project_id: str, accountId: str, display_name:str) -> Dict:
"""gcp_create_service_account Returns a Dict of details of the created service account
:type project_id: string
:param project_id: Name of the project
:type accountId: string
:param accountId: Name for the service account
:type display_name: string
:param display_name: Display Name for the service account
:rtype: Dict of details of the created service account
"""
"""Creates a service account."""
service = googleapiclient.discovery.build(
'iam', 'v1', credentials=handle)
result = {}
try:
response = service.projects().serviceAccounts().create(
name='projects/' + project_id,
body={
'accountId': accountId,
'serviceAccount': {
'displayName': display_name
}}).execute()
result = response
except Exception as error:
raise error
return result
================================================
FILE: GCP/legos/gcp_delete_bucket/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_delete_bucket/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_delete_bucket/gcp_delete_bucket.json
================================================
{
"action_title": "Delete GCP Bucket",
"action_description": "Delete a GCP bucket",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_delete_bucket",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["delete"],
"action_nouns": ["bucket","gcp"],
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_BUCKET" ]
}
================================================
FILE: GCP/legos/gcp_delete_bucket/gcp_delete_bucket.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from beartype import beartype
import pprint
from typing import List,Any, Dict
from google.cloud import storage
class InputSchema(BaseModel):
bucket_name: str = Field(
title = "Bucket Name",
description = "Name of the bucket to be deleted"
)
def gcp_delete_bucket_printer(output):
if output is None:
return
print(f"Bucket {output['deleted_bucket']} deleted")
def gcp_delete_bucket(handle, bucket_name: str) -> Dict:
"""gcp_delete_bucket Returns a Dict of details of the deleted bucket
:type bucket_name: string
:param bucket_name: Name of the bucket to be deleted
:rtype: Dict of Bucket Details
"""
result={}
try:
storage_client = storage.Client(credentials=handle)
bucket = storage_client.get_bucket(bucket_name)
result["deleted_bucket"]= bucket.name
bucket.delete()
except Exception as e:
raise e
return result
================================================
FILE: GCP/legos/gcp_delete_filestore_instance/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_delete_filestore_instance/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_delete_filestore_instance/gcp_delete_filestore_instance.json
================================================
{
"action_title": "Delete GCP Filestore Instance",
"action_description": "Delete a GCP Filestore Instance in the given location",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_delete_filestore_instance",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["delete"],
"action_nouns": ["filestore","gcp"],
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_FILE_STORE" ]
}
================================================
FILE: GCP/legos/gcp_delete_filestore_instance/gcp_delete_filestore_instance.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from google.cloud import filestore_v1
class InputSchema(BaseModel):
project_name: str = Field(
title = "GCP Project Name(ID)",
description = "GCP Project Name"
)
location: str = Field(
title = "Location",
description = "GCP locations map to GCP zones Eg: us-west1-b"
)
instance_id: str = Field(
title = "Instance ID",
description = "Name of the instance to be deleted"
)
def gcp_delete_filestore_instance_printer(output):
if output is None:
return
pprint.pprint(output)
def gcp_delete_filestore_instance(handle, instance_id:str, project_name:str, location:str) -> Dict:
"""gcp_delete_filestore_instance Returns status of details of the deleted Filestore Instance
:type instance_id: string
:param instance_id: Name of the instance to create
:type project_name: string
:param project_name: GCP Project Name
:type location: string
:param location: GCP locations map to GCP zones Eg: us-west1-b
:rtype: Status of Deleted Filestore Instance
"""
try:
client = filestore_v1.CloudFilestoreManagerClient(credentials=handle)
name = "projects/"+ project_name +"/locations/"+ location +"/instances/"+ instance_id
request = filestore_v1.DeleteInstanceRequest(name=name)
operation = client.delete_instance(request=request)
print("Waiting for operation to complete...")
operation.result()
result_dict={"Message":"Filestore Instance deleted"}
except Exception as e:
raise e
return result_dict
================================================
FILE: GCP/legos/gcp_delete_object_from_bucket/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_delete_object_from_bucket/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_delete_object_from_bucket/gcp_delete_object_from_bucket.json
================================================
{
"action_title": "Delete an Object from GCP Bucket",
"action_description": "Delete an Object/Blob from a GCP Bucket",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_delete_object_from_bucket",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["delete"],
"action_nouns": ["object","bucket","gcp"],
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_BUCKET" ]
}
================================================
FILE: GCP/legos/gcp_delete_object_from_bucket/gcp_delete_object_from_bucket.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Dict
from google.cloud import storage
class InputSchema(BaseModel):
blob_name: str = Field(
title = "Blob Name",
description = "Name of the object/blob to be deleted"
)
bucket_name: str = Field(
title = "Bucket Name",
description = "Name of the bucket to delete object/blob from"
)
def gcp_delete_object_from_bucket_printer(output):
if output is None:
return
print(f"Successfully deleted {output['blob_name']}")
def gcp_delete_object_from_bucket(handle,blob_name: str, bucket_name: str) -> Dict:
"""gcp_delete_object_from_bucket deletes an object in a GCP Bucket
:type blob_name: string
:param bucket_name: Name of the object/blob to be deleted
:type bucket_name: string
:param bucket_name:Name of the bucket to delete object/blob from
:rtype: Dict of deleted blob
"""
try:
result={}
storage_client = storage.Client(credentials=handle)
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(blob_name)
blob.delete()
result["blob_name"]= blob_name
except Exception as e:
raise e
return result
================================================
FILE: GCP/legos/gcp_delete_service_account/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_delete_service_account/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_delete_service_account/gcp_delete_service_account.json
================================================
{
"action_title": "GCP Delete Service Account",
"action_description": "GCP Delete Service Account",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_delete_service_account",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_IAM" ]
}
================================================
FILE: GCP/legos/gcp_delete_service_account/gcp_delete_service_account.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel, Field
import pprint
from typing import Dict
from googleapiclient import discovery
class InputSchema(BaseModel):
sa_id: str = Field(
title = "Service Account Email",
description = "Email of the service account"
)
def gcp_delete_service_account_printer(output):
if output is None:
return
pprint.pprint(output)
def gcp_delete_service_account(handle, sa_id: str) -> Dict:
"""gcp_delete_service_account Returns a Dict of success detailsfor the deleted service account
:type sa_id: string
:param sa_id: Email of the service account
:rtype: Dict
"""
service = discovery.build(
'iam', 'v1', credentials=handle)
result = {}
try:
result = service.projects().serviceAccounts().delete(
name='projects/-/serviceAccounts/' + sa_id).execute()
except Exception as error:
raise error
return result
================================================
FILE: GCP/legos/gcp_describe_gke_cluster/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_describe_gke_cluster/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_describe_gke_cluster/gcp_describe_gke_cluster.json
================================================
{
"action_title": "GCP Describe a GKE cluster",
"action_description": "GCP Describe a GKE cluster",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_describe_gke_cluster",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_GKE" ]
}
================================================
FILE: GCP/legos/gcp_describe_gke_cluster/gcp_describe_gke_cluster.py
================================================
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from google.cloud import container_v1
from google.protobuf.json_format import MessageToDict
class InputSchema(BaseModel):
project_id: str = Field(
title = "GCP Project",
description = "GCP Project Name"
)
zone: str = Field(
title = "Zone",
description = "GCP Zone where instance list should be gotten from"
)
cluster_name: str = Field(
title = "Cluster Name",
description = "Name of the GKE cluster."
)
def gcp_describe_gke_cluster_printer(output):
if len(output) == 0:
return
pprint.pprint(output)
def gcp_describe_gke_cluster(handle, project_id: str, zone: str, cluster_name: str) -> Dict:
"""gcp_describe_gke_cluster Returns the dict of cluster details
:type project_id: string
:param project_id: Google Cloud Platform Project
:type zone: string
:param zone: Zone to which the cluster in the project should be fetched.
:type cluster_name: string
:param cluster_name: Name of the GKE cluster.
:rtype: Dict of cluster details
"""
# Create a client
client = container_v1.ClusterManagerClient(credentials=handle)
name = f'projects/{project_id}/locations/{zone}/clusters/{cluster_name}'
try:
res = client.get_cluster(name=name)
response = {}
response['Name'] = cluster_name
response['CurrentNodeCount'] = res.current_node_count
response['NodePoolsCount'] = len(res.node_pools)
response['NodePoolDetails'] = []
for node_pool in res.node_pools:
nodePoolDetail = {}
nodePoolDetail['Name'] = node_pool.name
nodePoolDetail['NodeCount'] = node_pool.initial_node_count
nodePoolDetail['MachineType'] = node_pool.initial_node_count
nodePoolDetail['AutoscalingEnabled'] = node_pool.autoscaling.enabled
nodePoolDetail['MinNodes'] = node_pool.autoscaling.min_node_count
nodePoolDetail['MaxNodes'] = node_pool.autoscaling.max_node_count
response['NodePoolDetails'].append(nodePoolDetail)
except Exception as error:
raise error
return response
================================================
FILE: GCP/legos/gcp_fetch_objects_from_bucket/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_fetch_objects_from_bucket/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_fetch_objects_from_bucket/gcp_fetch_objects_from_bucket.json
================================================
{
"action_title": "Fetch Objects from GCP Bucket",
"action_description": "List all Objects in a GCP bucket",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_fetch_objects_from_bucket",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["list"],
"action_nouns": ["objects","bucket","gcp"],
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_BUCKET" ]
}
================================================
FILE: GCP/legos/gcp_fetch_objects_from_bucket/gcp_fetch_objects_from_bucket.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from typing import List
from pydantic import BaseModel, Field
from google.cloud import storage
class InputSchema(BaseModel):
bucket_name: str = Field(
title = "Bucket Name",
description = "Name of the bucket to be deleted"
)
def gcp_fetch_objects_from_bucket_printer(output):
if len(output)==0:
print("Bucket is empty")
return
for blob in output:
print(blob)
def gcp_fetch_objects_from_bucket(handle, bucket_name: str) -> List:
"""gcp_fetch_objects_from_bucket returns a List of objects in the Bucket
:type bucket_name: string
:param bucket_name: Name of the bucket to fetch objects/blobs from
:rtype: List of Bucket Objects
"""
try:
result =[]
storage_client = storage.Client(credentials=handle)
bucket = storage_client.get_bucket(bucket_name)
blobs = bucket.list_blobs()
for blob in blobs:
result.append(blob)
except Exception as e:
raise e
return result
================================================
FILE: GCP/legos/gcp_get_buckets_without_lifecycle_policies/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_get_buckets_without_lifecycle_policies/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_get_buckets_without_lifecycle_policies/gcp_get_buckets_without_lifecycle_policies.json
================================================
{
"action_title": "Get GCP storage buckets without lifecycle policies",
"action_description": "The action retrieves a list of Google Cloud Platform (GCP) storage buckets that do not have any lifecycle policies applied.",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_get_buckets_without_lifecycle_policies",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" , "CATEGORY_TYPE_GCP", "CATEGORY_TYPE_GCP_STORAGE"],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: GCP/legos/gcp_get_buckets_without_lifecycle_policies/gcp_get_buckets_without_lifecycle_policies.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import List, Tuple
from pydantic import BaseModel, Field
from google.cloud import storage
class InputSchema(BaseModel):
pass
def gcp_get_buckets_without_lifecycle_policies_printer(output):
if output is None:
return
print(output)
def gcp_get_buckets_without_lifecycle_policies(handle) -> Tuple:
"""gcp_get_buckets_without_lifecycle_policies Returns the List of GCP storage buckets without lifecycle policies
:type handle: object
:param handle: Object returned from Task Validate
:rtype: Tuple of storage buckets without lifecycle policies and the corresponding status.
"""
try:
storageClient = storage.Client(credentials=handle)
buckets = storageClient.list_buckets()
result = []
for bucket in buckets:
if not list(bucket.lifecycle_rules):
result.append(bucket.name)
if result:
return (False, result)
return (True, None)
except Exception as e:
raise e
================================================
FILE: GCP/legos/gcp_get_forwarding_rules_details/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_get_forwarding_rules_details/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_get_forwarding_rules_details/gcp_get_forwarding_rules_details.json
================================================
{
"action_title": "Get details of GCP forwarding rules",
"action_description": "Get details of forwarding rules associated with a backend service.",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_get_forwarding_rules_details",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" , "CATEGORY_TYPE_GCP", "CATEGORY_TYPE_GCP_VM"]
}
================================================
FILE: GCP/legos/gcp_get_forwarding_rules_details/gcp_get_forwarding_rules_details.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import List, Optional
from pydantic import BaseModel, Field
from google.cloud import compute_v1
class InputSchema(BaseModel):
project: str = Field(..., description='GCP project ID', title='Project ID')
def gcp_get_forwarding_rules_details_printer(output):
if output is None:
return
print(output)
def get_backend_services(project, handle):
client = compute_v1.BackendServicesClient(credentials=handle)
backend_services = client.list(project=project)
return {service.self_link: service.name for service in backend_services}
def get_target_proxy(forwarding_rule, project, handle):
target_http_proxy = []
if 'targetHttpProxies' in forwarding_rule.target:
target_proxy = compute_v1.TargetHttpProxiesClient(credentials=handle).get(
project=project,
target_http_proxy=forwarding_rule.target.split('/')[-1]
)
elif 'targetHttpsProxies' in forwarding_rule.target:
target_https_proxy = []
target_proxy = compute_v1.TargetHttpsProxiesClient(credentials=handle).get(
project=project,
target_https_proxy=forwarding_rule.target.split('/')[-1]
)
else:
raise Exception('Unsupported target proxy type')
return target_proxy
def gcp_get_forwarding_rules_details(handle, project: str) -> List:
"""gcp_get_forwarding_rules_details Returns the List of forwarding rules, its path and the associated backend service.
:type project: string
:param project: Google Cloud Platform Project
:rtype: List of of forwarding rules, its path and the associated backend service..
"""
client = compute_v1.GlobalForwardingRulesClient(credentials=handle)
backend_services = get_backend_services(project, handle)
result = []
# list all global forwarding rules
forwarding_rules = client.list(project=project)
for forwarding_rule in forwarding_rules:
target_proxy = get_target_proxy(forwarding_rule, project, handle)
# get the associated URL map
url_map = compute_v1.UrlMapsClient(credentials=handle).get(project=project, url_map=target_proxy.url_map.split('/')[-1])
# check if any backend service is associated with this URL map
for path_matcher in url_map.path_matchers:
for path_rule in path_matcher.path_rules:
if backend_services.get(path_rule.service):
result.append({"forwarding_rule_name":forwarding_rule.name, "backend_service":backend_services.get(path_rule.service), "path":path_rule.paths})
return result
================================================
FILE: GCP/legos/gcp_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_get_handle/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_get_handle/gcp_get_handle.json
================================================
{
"action_title": "Get GCP Handle",
"action_description": "Get GCP Handle",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_supports_iteration": false
}
================================================
FILE: GCP/legos/gcp_get_handle/gcp_get_handle.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def gcp_get_handle(handle):
"""gcp_get_handle returns the GCP handle.
:rtype: GCP Handle.
"""
return handle
================================================
FILE: GCP/legos/gcp_get_instances_without_label/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_get_instances_without_label/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_get_instances_without_label/gcp_get_instances_without_label.json
================================================
{
"action_title": "Get List of GCP compute instance without label",
"action_description": "Get List of GCP compute instance without label",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_get_instances_without_label",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_VM" ]
}
================================================
FILE: GCP/legos/gcp_get_instances_without_label/gcp_get_instances_without_label.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from google.cloud.compute_v1.services.instances import InstancesClient
class InputSchema(BaseModel):
project: str = Field(
title = "GCP Project",
description = "GCP Project Name"
)
zone: str = Field(
title = "Zone",
description = "GCP Zone where instance list should be gotten from"
)
def gcp_get_instances_without_label_printer(output):
if len(output) == 0:
return
pprint.pprint(output)
def gcp_get_instances_without_label(handle, project: str, zone:str) -> List:
"""gcp_get_instances_without_label Returns the List of compute instances
:type project: string
:param project: Google Cloud Platform Project
:type zone: string
:param zone: Zone to which the instance list in the project should be fetched.
:rtype: List of instances
"""
output = []
ic = InstancesClient(credentials=handle)
try:
result = ic.list(project=project, zone=zone)
instance_list = []
for instance in result:
instance_list.append(instance.name)
for instance_name in instance_list:
result = ic.get(project=project, zone=zone, instance=instance_name)
if not result.labels:
output.append(instance_name)
except Exception as error:
raise error
return output
================================================
FILE: GCP/legos/gcp_get_unused_backend_services/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_get_unused_backend_services/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_get_unused_backend_services/gcp_get_unused_backend_services.json
================================================
{
"action_title": "Get unused GCP backend services",
"action_description": "Get unused backend service for an application load balancer that has no instances in it's target group.\n",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_get_unused_backend_services",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" , "CATEGORY_TYPE_GCP", "CATEGORY_TYPE_GCP_VM"],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: GCP/legos/gcp_get_unused_backend_services/gcp_get_unused_backend_services.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import List
from google.cloud import compute_v1
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
project: str = Field(..., description='GCP project ID', title='Project ID')
def gcp_get_unused_backend_services_printer(output):
if output is None:
return
print(output)
def gcp_get_unused_backend_services(handle, project: str) -> List:
"""
gcp_get_unused_backend_services Returns a list of unused backend services
and their target groups which have 0 instances in the given project.
:type handle: object
:param handle: Object returned from Task Validate
:type project: string
:param project: Google Cloud Platform Project
:return: Status, List of unused Backend services
"""
backendClient = compute_v1.BackendServicesClient(credentials=handle)
instanceClient = compute_v1.InstanceGroupsClient(credentials=handle)
# List all backend services
backend_services = [
{
"backend_service_name": page.name,
"backend_instance_group_name": page.backends[0].group.split('/')[-1]
}
for page in backendClient.list(project=project)
]
# Create a list for instance groups with instance size = 0
instance_groups = [
instance.name for zone, response in instanceClient.aggregated_list(project=project)
for instance in response.instance_groups if instance.size == 0
]
# Compare the backend service instance groups to the instance groups that have instance size = 0
result = [
{
"backend_service_name": ser["backend_service_name"],
"instance_group_name": ser["backend_instance_group_name"]
}
for ser in backend_services if ser["backend_instance_group_name"] in instance_groups
]
return (False, result) if result else (True, None)
================================================
FILE: GCP/legos/gcp_list_buckets/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_list_buckets/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_list_buckets/gcp_list_buckets.json
================================================
{
"action_title": "List all GCP Buckets",
"action_description": "List all GCP buckets",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_list_buckets",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["list"],
"action_nouns": ["buckets","gcp"],
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_BUCKET" ]
}
================================================
FILE: GCP/legos/gcp_list_buckets/gcp_list_buckets.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel
from google.cloud import storage
class InputSchema(BaseModel):
pass
def gcp_list_buckets_printer(output):
if len(output)==0:
print("There are no buckets available")
return
pprint.pprint(output)
def gcp_list_buckets(handle) -> List:
"""gcp_list_buckets lists all GCP Buckets
:rtype: List of all GCP buckets
"""
try:
result=[]
storage_client = storage.Client(credentials=handle)
buckets = storage_client.list_buckets()
for bucket in buckets:
result.append(bucket.name)
except Exception as e:
raise e
return result
================================================
FILE: GCP/legos/gcp_list_compute_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_list_compute_instances/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_list_compute_instances/gcp_list_compute_instances.json
================================================
{
"action_title": "Get GCP compute instances",
"action_description": "Get GCP compute instances",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_list_compute_instances",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_VM"]
}
================================================
FILE: GCP/legos/gcp_list_compute_instances/gcp_list_compute_instances.py
================================================
from typing import List, Optional
from pydantic import BaseModel, Field
from google.cloud.compute_v1.services.instances import InstancesClient
import re
class InputSchema(BaseModel):
project: str = Field(
title = "GCP Project",
description = "GCP Project Name"
)
zone: Optional[str] = Field(
title = "Zone",
description = "GCP Zone where instance list should be gotten from"
)
def gcp_list_compute_instances_printer(output):
if len(output) == 0:
return
for instance in output:
print(instance)
def gcp_list_compute_instances(handle, project: str, zone:str="") -> List:
"""gcp_list_compute_instances Returns the List of compute instances
from given project and zone
:type project: string
:param project: Google Cloud Platform Project
:type zone: string
:param zone: Zone to which the instance list in the project should be fetched.
:rtype: List of instances
"""
output = []
instanceClient = InstancesClient(credentials=handle)
if zone:
instances = instanceClient.list(project=project, zone=zone)
for instance in instances:
output.append({"instance_name": instance.name,"instance_zone": zone})
else:
request = {"project" : project,}
agg_list = instanceClient.aggregated_list(request=request)
for instance_zone, response in agg_list:
if response.instances:
for instance in response.instances:
zone_url = re.compile(r'https:\/\/www\.googleapis\.com\/compute\/v1\/projects\/unskript-dev\/zones\/([A-Za-z0-9]+(-[A-Za-z0-9]+)+)')
instance_zone = zone_url.search(instance.zone)
output.append({"instance_name": instance.name, "instance_zone": instance_zone.group(1)})
return output
================================================
FILE: GCP/legos/gcp_list_compute_instances_by_label/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_list_compute_instances_by_label/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_list_compute_instances_by_label/gcp_list_compute_instances_by_label.json
================================================
{
"action_title": "Get List of GCP compute instance by label",
"action_description": "Get List of GCP compute instance by label",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_list_compute_instances_by_label",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_VM" ]
}
================================================
FILE: GCP/legos/gcp_list_compute_instances_by_label/gcp_list_compute_instances_by_label.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from google.cloud.compute_v1.services.instances import InstancesClient
class InputSchema(BaseModel):
project: str = Field(
title = "GCP Project",
description = "GCP Project Name"
)
zone: str = Field(
title = "Zone",
description = "GCP Zone where instance list should be gotten from"
)
key: str = Field(
title = "Label Key",
description = "GCP label key assigned to instance"
)
value: str = Field(
title = "Label Value",
description = "GCP label value assigned to instance"
)
def gcp_list_compute_instances_by_label_printer(output):
if len(output) == 0:
return
pprint.pprint(output)
def gcp_list_compute_instances_by_label(
handle,
project: str,
zone:str,
key: str,
value: str
) -> List:
"""gcp_list_compute_instances_by_label Returns the List of compute instances
:type project: string
:param project: Google Cloud Platform Project
:type zone: string
:param zone: Zone to which the instance list in the project should be fetched.
:type key: string
:param key: GCP label key assigned to instance.
:type value: string
:param value: GCP label value assigned to instance.
:rtype: List of instances
"""
output = []
ic = InstancesClient(credentials=handle)
try:
result = ic.list(project=project, zone=zone)
instance_list = []
for instance in result:
instance_list.append(instance.name)
for instance_name in instance_list:
result = ic.get(project=project, zone=zone, instance=instance_name)
if key in result.labels.keys():
if value in result.labels.values():
output.append(result.name)
except Exception as error:
raise error
return output
================================================
FILE: GCP/legos/gcp_list_compute_instances_by_vpc/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_list_compute_instances_by_vpc/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_list_compute_instances_by_vpc/gcp_list_compute_instances_by_vpc.json
================================================
{
"action_title": "Get list compute instance by VPC",
"action_description": "Get list compute instance by VPC",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_list_compute_instances_by_vpc",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_VM","CATEGORY_TYPE_GCP_VPC" ]
}
================================================
FILE: GCP/legos/gcp_list_compute_instances_by_vpc/gcp_list_compute_instances_by_vpc.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from google.cloud.compute_v1.services.instances import InstancesClient
class InputSchema(BaseModel):
project: str = Field(
title="GCP Project",
description="GCP Project Name"
)
zone: str = Field(
title="Zone",
description="GCP Zone where instance list should be gotten from"
)
vpc_id: str = Field(
title="VPC Name",
description="Name of the VPC."
)
def gcp_list_compute_instances_by_vpc_printer(output):
if len(output) == 0:
return
pprint.pprint(output)
def gcp_list_compute_instances_by_vpc(
handle,
project: str,
zone: str,
vpc_id: str
) -> List:
"""gcp_list_instances_by_vpc Returns the List of compute instances
:type project: string
:param project: Google Cloud Platform Project
:type zone: string
:param zone: Zone to which the instance list in the project should be fetched.
:type vpc_id: string
:param vpc_id: Name of the VPC.
:rtype: List of instances
"""
result = []
ic = InstancesClient(credentials=handle)
instances = ic.list(project=project, zone=zone)
instance_list = []
for instance in instances:
instance_list.append(instance.name)
for instance in instance_list:
get_data = ic.get(project=project, zone=zone, instance=instance)
response = get_data.network_interfaces
for data in response:
if vpc_id in data.network:
result.append(instance)
return result
================================================
FILE: GCP/legos/gcp_list_gke_cluster/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_list_gke_cluster/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_list_gke_cluster/gcp_list_gke_cluster.json
================================================
{
"action_title": "GCP List GKE Cluster",
"action_description": "GCP List GKE Cluster",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_list_gke_cluster",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_GKE" ]
}
================================================
FILE: GCP/legos/gcp_list_gke_cluster/gcp_list_gke_cluster.py
================================================
import pprint
from typing import List
from pydantic import BaseModel, Field
from google.cloud import container_v1
class InputSchema(BaseModel):
project_id: str = Field(
title = "GCP Project",
description = "GCP Project Name"
)
zone: str = Field(
title = "Zone",
description = "GCP Zone where instance list should be gotten from"
)
def gcp_list_gke_cluster_printer(output):
if len(output) == 0:
return
pprint.pprint(output)
def gcp_list_gke_cluster(handle, project_id: str, zone: str) -> List:
"""gcp_list_gke_cluster Returns the list of cluster
:type project_id: string
:param project_id: Google Cloud Platform Project
:type zone: string
:param zone: Zone to which the cluster in the project should be fetched.
:rtype: list of cluster
"""
# Create a client
cluster_list = []
client = container_v1.ClusterManagerClient(credentials=handle)
try:
parent = f'projects/{project_id}/locations/{zone}'
response = client.list_clusters(parent=parent)
for cluster in response.clusters:
cluster_list.append(cluster.name)
except Exception as error:
raise error
return cluster_list
================================================
FILE: GCP/legos/gcp_list_nodes_in_gke_cluster/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_list_nodes_in_gke_cluster/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_list_nodes_in_gke_cluster/gcp_list_nodes_in_gke_cluster.json
================================================
{
"action_title": "GCP List Nodes in GKE Cluster",
"action_description": "GCP List Nodes of GKE Cluster",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_list_nodes_in_gke_cluster",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_GKE" ]
}
================================================
FILE: GCP/legos/gcp_list_nodes_in_gke_cluster/gcp_list_nodes_in_gke_cluster.py
================================================
import pprint
from typing import List
from pydantic import BaseModel, Field
from google.cloud import container_v1
class InputSchema(BaseModel):
project_id: str = Field(
title = "GCP Project",
description = "GCP Project Name"
)
zone: str = Field(
title = "Zone",
description = "GCP Zone where instance list should be gotten from"
)
cluster_name: str = Field(
title = "Cluster Name",
description = "Name of the GKE cluster."
)
def gcp_list_nodes_in_gke_cluster_printer(output):
if len(output) == 0:
return
pprint.pprint(output)
def gcp_list_nodes_in_gke_cluster(handle, project_id: str, zone: str, cluster_name: str) -> List:
"""gcp_list_nodes_in_gke_cluster Returns the list of cluster nodes
:type project_id: string
:param project_id: Google Cloud Platform Project
:type zone: string
:param zone: Zone to which the cluster in the project should be fetched.
:type cluster_name: string
:param cluster_name: Name of the GKE cluster.
:rtype: list of cluster nodes
"""
# Create a client
node_list = []
client = container_v1.ClusterManagerClient(credentials=handle)
try:
response = client.list_node_pools(project_id=project_id, zone=zone,
cluster_id=cluster_name)
for nodes in response.node_pools:
node_list.append(nodes.name)
except Exception as error:
raise error
return node_list
================================================
FILE: GCP/legos/gcp_list_public_buckets/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_list_public_buckets/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_list_public_buckets/gcp_list_public_buckets.json
================================================
{
"action_title": "List all Public GCP Buckets",
"action_description": "List all publicly available GCP buckets",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_list_public_buckets",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type":"ACTION_OUTPUT_TYPE_LIST",
"action_verbs": ["list"],
"action_nouns": ["public","buckets","gcp"],
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_BUCKET" ]
}
================================================
FILE: GCP/legos/gcp_list_public_buckets/gcp_list_public_buckets.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from typing import List
from pydantic import BaseModel
from google.cloud import storage
class InputSchema(BaseModel):
pass
def gcp_list_public_buckets_printer(output):
if len(output)==0:
print("There are no publicly readable buckets available")
return
print(output)
def gcp_list_public_buckets(handle) -> List:
"""gcp_list_public_buckets lists all public GCP Buckets
:rtype: List of all public GCP buckets
"""
try:
storage_client = storage.Client(credentials=handle)
buckets = storage_client.list_buckets()
result = []
for bucket in buckets:
l = str(bucket.name)
b = storage_client.bucket(l)
policy = b.get_iam_policy(requested_policy_version=3)
for binding in policy.bindings:
if binding['members']=={'allUsers'}:
result.append(bucket.name)
except Exception as e:
raise e
return result
================================================
FILE: GCP/legos/gcp_list_secrets/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_list_secrets/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_list_secrets/gcp_list_secrets.json
================================================
{
"action_title": "List GCP Secrets",
"action_description": "List of your GCP Secrets",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_list_secrets",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type":"ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_SECRET" ]
}
================================================
FILE: GCP/legos/gcp_list_secrets/gcp_list_secrets.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from google.cloud import secretmanager
class InputSchema(BaseModel):
name: str = Field(
title='Project Name',
description='Name of the Google Cloud Project.')
def gcp_list_secrets_printer(output):
if output is None:
return
pprint.pprint(output)
def gcp_list_secrets(handle, name: str) -> List:
"""gcp_list_secrets List all the secrets for a given project.
:type name: string
:param name: Name of the Google Cloud Project.
:rtype: List of the names of all the secrets.
"""
client = secretmanager.SecretManagerServiceClient(credentials=handle)
# Input param validation.
parent = "projects/" + name
try:
resp = client.list_secrets(parent=parent)
except Exception as e:
raise e
output = []
for i in resp.secrets:
output.append(i.name)
return output
================================================
FILE: GCP/legos/gcp_list_service_accounts/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_list_service_accounts/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_list_service_accounts/gcp_list_service_accounts.json
================================================
{
"action_title": "GCP List Service Accounts",
"action_description": "GCP List Service Accounts",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_list_service_accounts",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_SECRET" ]
}
================================================
FILE: GCP/legos/gcp_list_service_accounts/gcp_list_service_accounts.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
import googleapiclient.discovery
class InputSchema(BaseModel):
project_id: str = Field(
title = "Project ID",
description = "Name of the project e.g unskript-dev"
)
def gcp_list_service_accounts_printer(output):
if output is None:
return
pprint.pprint(output)
def gcp_list_service_accounts(handle, project_id: str) -> List:
"""gcp_list_service_accounts Returns a list of service accounts
:type project_id: string
:param project_id: Name of the project
:rtype: List of service accounts
"""
result = []
service = googleapiclient.discovery.build(
'iam', 'v1', credentials=handle)
try:
service_accounts = service.projects().serviceAccounts().list(
name='projects/' + project_id).execute()
for account in service_accounts["accounts"]:
result.append(account["name"])
except Exception as error:
raise error
return result
================================================
FILE: GCP/legos/gcp_list_vms_access/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_list_vms_access/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_list_vms_access/gcp_list_vms_access.json
================================================
{
"action_title": "List all GCP VMs and if Publicly Accessible",
"action_description": "Lists all GCP buckets, and identifies those tha are public.",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_list_vms_access",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["list"],
"action_nouns": ["VMs","gcp"],
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_VMS"]
}
================================================
FILE: GCP/legos/gcp_list_vms_access/gcp_list_vms_access.py
================================================
## Copyright (c) 2023 unSkript, Inc
## Written by Doug Sillars & ChatGPT
## All rights reserved.
##
from typing import List
from pydantic import BaseModel, Field
from google.cloud import compute
from beartype import beartype
class InputSchema(BaseModel):
project: str = Field(
title='Project Name',
description='Name of the Google Cloud Project.')
zone: str = Field(
title='Zone',
description='Name of the Google Cloud Zone where the project is located.')
@beartype
def gcp_list_vms_access_printer(output):
if len(output)==0:
print("There are no publicly readable buckets available")
return
print(output)
@beartype
def gcp_list_vms_access(handle, project:str, zone:str) -> List:
compute_client = compute.InstancesClient(credentials=handle)
vms = compute_client.list(project=project, zone=zone)
vm_list = []
for vm in vms:
vm_info = {}
vm_info['name'] = vm.name
vm_info['publicly_accessible'] = vm.can_ip_forward
vm_list.append(vm_info)
return vm_list
================================================
FILE: GCP/legos/gcp_remove_member_from_iam_role/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_remove_member_from_iam_role/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_remove_member_from_iam_role/gcp_remove_member_from_iam_role.json
================================================
{
"action_title": "GCP Remove Member from IAM Role",
"action_description": "Remove member from the chosen IAM role.",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_remove_member_from_iam_role",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_IAM" ]
}
================================================
FILE: GCP/legos/gcp_remove_member_from_iam_role/gcp_remove_member_from_iam_role.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
import googleapiclient.discovery
class InputSchema(BaseModel):
project_id: str = Field(
title = "Project ID",
description = "Name of the project e.g unskript-dev"
)
role: str = Field(
title = "Role Name",
description = "Role name from which member needs to remove e.g iam.serviceAccountUser"
)
member_email: str = Field(
title = "Member Email",
description = "Member email which has GCP access e.g test@company.com"
)
version: int = Field(
title = "Requested Policy Version",
description = "Requested Policy Version"
)
def gcp_remove_member_from_iam_role_printer(output):
if output is None:
return
pprint.pprint(output)
def gcp_remove_member_from_iam_role(
handle,
project_id: str,
role: str,
member_email:str,
version:int = 1
) -> Dict:
"""gcp_remove_member_from_iam_role Returns a Dict of new policy details
:type project_id: string
:param project_id: Name of the project
:type role: string
:param role: Role name from which member needs to remove e.g iam.serviceAccountUser
:type member_email: string
:param member_email: Member email which has GCP access e.g test@company.com
:type version: int
:param version: Requested Policy Version
:rtype: Dict of new policy details
"""
service = googleapiclient.discovery.build(
"cloudresourcemanager", "v1", credentials=handle)
result = {}
try:
member = "user:" + member_email
if "gserviceaccount" in member_email:
member = "serviceAccount:" + member_email
get_policy = (
service.projects().getIamPolicy(
resource=project_id,
body={"options": {"requestedPolicyVersion": version}}).execute())
get_role = "roles/" + role
binding = next(b for b in get_policy["bindings"] if b["role"] == get_role)
if "members" in binding and member in binding["members"]:
binding["members"].remove(member)
remove_member = (
service.projects()
.setIamPolicy(resource=project_id, body={"policy": get_policy}).execute())
result = remove_member
except Exception as error:
raise error
return result
================================================
FILE: GCP/legos/gcp_remove_role_from_service_account/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_remove_role_from_service_account/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_remove_role_from_service_account/gcp_remove_role_from_service_account.json
================================================
{
"action_title": "GCP Remove Role from Service Account",
"action_description": "Remove role and member from the service account",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_remove_role_from_service_account",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_SECOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_IAM" ]
}
================================================
FILE: GCP/legos/gcp_remove_role_from_service_account/gcp_remove_role_from_service_account.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from googleapiclient import discovery
class InputSchema(BaseModel):
project_id: str = Field(
title = "Project ID",
description = "Name of the project e.g unskript-dev"
)
role: str = Field(
title = "Role Name",
description = "Role name from which member needs to remove e.g iam.serviceAccountUser"
)
sa_id: str = Field(
title = "Service Account Email",
description = "Service Account email id e.g test-user@unskript-dev.iam.gserviceaccount.com"
)
def gcp_remove_role_from_service_account_printer(output):
if output is None:
return
pprint.pprint(output)
def gcp_remove_role_from_service_account(
handle,
project_id: str,
role: str,
sa_id:str
) -> Dict:
"""gcp_remove_role_from_service_account Returns a Dict of new policy details
:type project_id: string
:param project_id: Name of the project
:type role: string
:param role: Role name from which member needs to remove e.g iam.serviceAccountUser
:type sa_id: string
:param sa_id: Service Account email
:rtype: Dict of new policy details
"""
service = discovery.build('iam', 'v1', credentials=handle)
result = {}
try:
resource = f'projects/{project_id}/serviceAccounts/{sa_id}'
request = service.projects().serviceAccounts().getIamPolicy(resource=resource)
get_policy = request.execute()
get_role = "roles/"+role
binding = next(b for b in get_policy["bindings"] if b["role"] == get_role)
get_policy["bindings"].remove(binding)
set_policy = service.projects().serviceAccounts().setIamPolicy(
resource=resource,
body={"policy": get_policy}
)
policy_output = set_policy.execute()
result = policy_output
except Exception as error:
raise error
return result
================================================
FILE: GCP/legos/gcp_remove_user_role/README.md
================================================
[
================================================
FILE: GCP/legos/gcp_remove_user_role/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_remove_user_role/gcp_remove_user_role.json
================================================
{
"action_title": "Remove role from user",
"action_description": "GCP lego for removing a role from a user (default: 'viewer')",
"action_entry_function": "gcp_remove_user_role",
"action_type": "LEGO_TYPE_GCP",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_IAM" ]
}
================================================
FILE: GCP/legos/gcp_remove_user_role/gcp_remove_user_role.py
================================================
import pprint
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
role: str = Field(
title = "Role",
description = "GCP user role to be removed"
)
member: str = Field(
title = "Member",
description = "user's id to be removed"
)
resource: str = Field(
title = "Resource",
description = ('GCP Resource in the form of project/
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_resize_gke_cluster/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_resize_gke_cluster/gcp_resize_gke_cluster.json
================================================
{
"action_title": "GCP Resize a GKE cluster",
"action_description": "GCP Resize a GKE cluster by modifying nodes",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_resize_gke_cluster",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_GKE" ]
}
================================================
FILE: GCP/legos/gcp_resize_gke_cluster/gcp_resize_gke_cluster.py
================================================
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from google.cloud import container_v1
from google.protobuf.json_format import MessageToDict
class InputSchema(BaseModel):
project_id: str = Field(
title = "GCP Project",
description = "GCP Project Name"
)
zone: str = Field(
title = "Zone",
description = "GCP Zone where instance list should be gotten from"
)
cluster_name: str = Field(
title = "Cluster Name",
description = "Name of the GKE cluster."
)
node_id: str = Field(
title = "Node Name",
description = "Name of the GKE cluster Node."
)
node_count: str = Field(
title = "Initial Node Count",
description = "Node count of GKE cluster."
)
def gcp_resize_gke_cluster_printer(output):
if len(output) == 0:
return
pprint.pprint(output)
def gcp_resize_gke_cluster(
handle,
project_id: str,
zone: str,
cluster_name: str,
node_id: str,
node_count:int) -> Dict:
"""gcp_resize_gke_cluster Returns the dict of cluster details
:type project_id: string
:param project_id: Google Cloud Platform Project
:type zone: string
:param zone: Zone to which the cluster in the project should be fetched.
:type cluster_name: string
:param cluster_name: Name of the GKE cluster.
:type node_id: string
:param node_id: Name of the GKE cluster Node.
:type node_count: int
:param node_count: Node count of GKE cluster.
:rtype: Dict of cluster details
"""
# Create a client
client = container_v1.ClusterManagerClient(credentials=handle)
try:
request = container_v1.SetNodePoolSizeRequest(
project_id=project_id,
zone=zone,
cluster_id=cluster_name,
node_pool_id=node_id,
node_count=node_count,
)
res = client.set_node_pool_size(request=request)
response = MessageToDict(res._pb)
except Exception as error:
raise error
return response
================================================
FILE: GCP/legos/gcp_restart_compute_instances/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_restart_compute_instances/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_restart_compute_instances/gcp_restart_compute_instances.json
================================================
{
"action_title": "GCP Restart compute instance",
"action_description": "GCP Restart compute instance",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_restart_compute_instances",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_VM" ]
}
================================================
FILE: GCP/legos/gcp_restart_compute_instances/gcp_restart_compute_instances.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from google.cloud.compute_v1.services.instances import InstancesClient
class InputSchema(BaseModel):
project_name: str = Field(
title = "GCP Project",
description = "GCP Project Name"
)
zone: str = Field(
title = "Zone",
description = "GCP Zone where instance list should be gotten from"
)
instance_name: str = Field(
title = "Instance Name",
description = "Name of the instance."
)
def gcp_restart_compute_instances_printer(output):
if len(output) == 0:
return
pprint.pprint(output)
def gcp_restart_compute_instances(
handle,
project_name: str,
zone:str,
instance_name: str
) -> Dict:
"""gcp_restart_compute_instances Returns the dict of instance data
:type project: string
:param project: Google Cloud Platform Project
:type zone: string
:param zone: Zone to which the instance list in the project should be fetched.
:type instance_name: string
:param instance_name: Name of the instance.
:rtype: Dict of instances data
"""
output = {}
try:
ic = InstancesClient(credentials=handle)
result = ic.reset(
project=project_name, zone=zone, instance=instance_name)
output['id'] = result.id
output['name'] = result.name
output['status'] = result.status
except Exception as error:
raise error
return output
================================================
FILE: GCP/legos/gcp_restore_disk_from_snapshot/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_restore_disk_from_snapshot/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_restore_disk_from_snapshot/gcp_restore_disk_from_snapshot.json
================================================
{
"action_title": "Restore GCP disk from a snapshot ",
"action_description": "Restore a GCP disk from a compute instance snapshot.",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_restore_disk_from_snapshot",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_VM"]
}
================================================
FILE: GCP/legos/gcp_restore_disk_from_snapshot/gcp_restore_disk_from_snapshot.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import Dict, Optional
from pydantic import BaseModel, Field
from google.cloud.compute_v1.services.disks import DisksClient
from google.cloud.compute_v1.services.snapshots import SnapshotsClient
from google.cloud.compute_v1.types import Disk, Snapshot
class InputSchema(BaseModel):
project: str = Field(..., description='GCP Project Name', title='GCP Project')
zone: str = Field(
...,
description='GCP Zone where the disk and snapshot reside.',
title='Zone',
)
disk: str = Field(
..., description='The name of the disk to restore.', title='Disk name'
)
snapshot_name: str = Field(
...,
description='The name of the snapshot to restore from.',
title='Snapshot name',
)
def gcp_restore_disk_from_snapshot_printer(output):
if output is None:
return
print(output)
def gcp_restore_disk_from_snapshot(handle, project: str, zone: str, disk: str, snapshot_name: str) -> str:
"""gcp_restore_disk_from_snapshot Returns the confirmation of disk restoration.
:type handle: object
:param handle: Object returned from Task Validate
:type project: string
:param project: Google Cloud Platform Project
:type zone: string
:param zone: GCP Zone where the disk and snapshot reside.
:type disk: string
:param disk: The name of the disk to restore.
:type snapshot_name: string
:param snapshot_name: The name of the snapshot to restore from.
:rtype: String of disk restoration confirmation
"""
disks_client = DisksClient(credentials=handle)
snapshots_client = SnapshotsClient(credentials=handle)
snapshot = snapshots_client.get(project=project, snapshot=snapshot_name)
# Create a Disk object with the Snapshot as the source
disk_to_restore = Disk(name=disk, source_snapshot=snapshot.self_link)
try:
# Creating a disk from snapshot
disks_client.insert(
project=project, zone=zone, disk_resource=disk_to_restore
)
except Exception as e:
raise e
return f"Disk {disk} restored from Snapshot {snapshot_name}."
================================================
FILE: GCP/legos/gcp_save_csv_to_google_sheets_v1/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_stop_compute_instances/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_stop_compute_instances/gcp_stop_compute_instances.json
================================================
{
"action_title": "GCP Stop compute instance",
"action_description": "GCP Stop compute instance",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_stop_compute_instances",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_VM" ]
}
================================================
FILE: GCP/legos/gcp_stop_compute_instances/gcp_stop_compute_instances.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from google.cloud.compute_v1.services.instances import InstancesClient
class InputSchema(BaseModel):
project_name: str = Field(
title = "GCP Project",
description = "GCP Project Name"
)
zone: str = Field(
title = "Zone",
description = "GCP Zone where instance list should be gotten from"
)
instance_name: str = Field(
title = "Instance Name",
description = "Name of the instance."
)
def gcp_stop_compute_instances_printer(output):
if len(output) == 0:
return
pprint.pprint(output)
def gcp_stop_compute_instances(handle, project_name: str, zone:str, instance_name: str) -> Dict:
"""gcp_stop_compute_instance Returns the dict of instance data
:type project: string
:param project: Google Cloud Platform Project
:type zone: string
:param zone: Zone to which the instance list in the project should be fetched.
:type instance_name: string
:param instance_name: Name of the instance.
:rtype: Dict of instances data
"""
output = {}
try:
ic = InstancesClient(credentials=handle)
result = ic.stop(
project=project_name, zone=zone, instance=instance_name)
output['id'] = result.id
output['name'] = result.name
output['status'] = result.status
except Exception as error:
raise error
return output
================================================
FILE: GCP/legos/gcp_upload_file_to_bucket/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: GCP/legos/gcp_upload_file_to_bucket/__init__.py
================================================
================================================
FILE: GCP/legos/gcp_upload_file_to_bucket/gcp_upload_file_to_bucket.json
================================================
{
"action_title": "Upload an Object to GCP Bucket",
"action_description": "Upload an Object/Blob in a GCP bucket",
"action_type": "LEGO_TYPE_GCP",
"action_entry_function": "gcp_upload_file_to_bucket",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["upload"],
"action_nouns": ["file","bucket","gcp"],
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GCP","CATEGORY_TYPE_GCP_BUCKET" ]
}
================================================
FILE: GCP/legos/gcp_upload_file_to_bucket/gcp_upload_file_to_bucket.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from typing import Dict
from pydantic import BaseModel, Field
from google.cloud import storage
class InputSchema(BaseModel):
blob_name: str = Field(
title = "Blob Name",
description = "Name of the object/blob to be created"
)
bucket_name: str = Field(
title = "Bucket Name",
description = "Name of the bucket to create object/blob"
)
data: str = Field(
title = "Input Data",
description = "String of data to be added to the object/blob"
)
def gcp_upload_file_to_bucket_printer(output):
if output is None:
return
print(f"Created an object {output['blob_name']} in {output['bucket_name']} bucket")
def gcp_upload_file_to_bucket(handle,blob_name: str, bucket_name: str, data: str) -> Dict:
"""gcp_upload_file_to_bucket returns a List of objects in the Bucket
:type blob_name: string
:param bucket_name: Name of the object/blob to be created
:type bucket_name: string
:param bucket_name:Name of the bucket to create object/blob
:type data: string
:param bucket_name: String of data to be added to the object/blob
:rtype: Dict of blob details
"""
try:
result = {}
storage_client = storage.Client(credentials=handle)
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(blob_name)
blob = blob.upload_from_string(data)
result["blob_name"] = blob_name
result["bucket_name"] = bucket_name
except Exception as e:
raise e
return result
================================================
FILE: Github/README.md
================================================
# Github Actions
* [Github Assign Issue](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_assign_issue/README.md): Assign a github issue to a user
* [Github Check if Pull Request is merged](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_check_if_pull_request_is_merged/README.md): Check if a Github Pull Request is merged
* [Github Close Pull Request](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_close_pull_request/README.md): Close pull request based on pull request number
* [Github Count Stars](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_count_stars/README.md): Get count of stars for a repository
* [Github Create Issue](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_create_issue/README.md): Create a new Github Issue for a repository
* [Github Create Team](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_create_team/README.md): Create a new Github Team
* [Github Delete Branch](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_delete_branch/README.md): Delete a github branch
* [Github Get Branch](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_get_branch/README.md): Get Github branch for a user in a repository
* [Get Github Handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_get_handle/README.md): Get Github Handle
* [Github Get Issue](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_get_issue/README.md): Get Github Issue from a repository
* [Github Get Open Branches](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_get_open_branches/README.md): Get first 100 open branches for a given user in a given repo.
* [Github Get Pull Request](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_get_pull_request/README.md): Get Github Pull Request for a user in a repository
* [Github Get Team](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_get_team/README.md): Github Get Team
* [Github Get User](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_get_user/README.md): Get Github User details
* [Github Invite User to Organization](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_invite_user_to_org/README.md): Invite a Github User to an Organization
* [Github Comment on an Issue](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_issue_comment/README.md): Add a comment to the selected GitHub Issue
* [Github List Open Issues](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_list_open_issues/README.md): List open Issues in a Github Repository
* [Github List Organization Members](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_list_org_members/README.md): List Github Organization Members
* [Github List PR Commits](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_list_pull_request_commits/README.md): Github List all Pull Request Commits
* [Github List Pull Request Reviewers](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_list_pull_request_reviewers/README.md): List PR reviewers for a PR
* [Github List Pull Requests](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_list_pull_requests/README.md): List pull requests for a user in a repository
* [Github List Stale Issues](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_list_stale_issues/README.md): List Stale Github Issues that have crossed a certain age limit.
* [Github List Stale Pull Requests](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_list_stale_pull_requests/README.md): Check for any Pull requests over a certain age.
* [Github List Stargazers](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_list_stargazers/README.md): List of Github users that have starred (essentially bookmarked) a repository
* [Github List Team Members](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_list_team_members/README.md): List Github Team Members for a given Team
* [Github List Team Repositories](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_list_team_repos/README.md): Github List Team Repositories
* [Github List Teams in Organization](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_list_teams_in_org/README.md): List teams in a organization in GitHub
* [Github List Webhooks](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_list_webhooks/README.md): List webhooks for a repository
* [Github Merge Pull Request](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_merge_pull_request/README.md): Github Merge Pull Request
* [Github Remove Member from Organization](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Github/legos/github_remove_member_from_org/README.md): Remove a member from a Github Organization
================================================
FILE: Github/__init__.py
================================================
================================================
FILE: Github/legos/__init__.py
================================================
================================================
FILE: Github/legos/github_assign_issue/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_assign_issue/__init__.py
================================================
================================================
FILE: Github/legos/github_assign_issue/github_assign_issue.json
================================================
{
"action_title": "Github Assign Issue",
"action_description": "Assign a github issue to a user",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_assign_issue",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_ISSUE" ]
}
================================================
FILE: Github/legos/github_assign_issue/github_assign_issue.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from github import GithubException
class InputSchema(BaseModel):
owner: str = Field(
..., description='Username of the GitHub user. Eg: "johnwick"', title='Owner'
)
repository: str = Field(
...,
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
issue_number: int = Field(
...,
description='Issue Number. Eg: "367"',
title='Issue Number',
)
assignee: int = Field(
...,
description='Username of the assignee.',
title='Assignee Username',
)
def github_assign_issue_printer(output):
if output is None:
return
pprint.pprint(output)
def github_assign_issue(
handle,
owner:str,
repository:str,
issue_number:int,
assignee:str
) -> str:
"""github_assign_issue assigns an issue to user
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string
:param owner: Username of the GitHub user. Eg: "johnwick"
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:type issue_number: int
:param issue_number: Issue number. Eg: 345
:type assignee: string
:param assignees: Username of the assignee.
:rtype: Status of assigning an issue to a user
"""
issue_no = int(issue_number)
try:
owner = handle.get_user(owner)
repo_name = owner.login + '/' + repository
repo = handle.get_repo(repo_name)
issue = repo.get_issue(issue_no)
result = issue.add_to_assignees(assignee)
except GithubException as e:
if e.status == 403:
raise Exception("You need admin access") from e
if e.status == 404:
raise Exception("No such repository or user found") from e
raise e.data
except Exception as e:
raise e
if result is None:
return f"Issue {issue_no} assigned to {assignee}"
return f"Unable to assign Issue {issue_no} to {assignee}"
================================================
FILE: Github/legos/github_close_pull_request/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_close_pull_request/__init__.py
================================================
================================================
FILE: Github/legos/github_close_pull_request/github_close_pull_request.json
================================================
{
"action_title": "Github Close Pull Request",
"action_description": "Close pull request based on pull request number",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_close_pull_request",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_PR" ]
}
================================================
FILE: Github/legos/github_close_pull_request/github_close_pull_request.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from github import GithubException
class InputSchema(BaseModel):
owner: str = Field(
description='Username of the GitHub user. Eg: "johnwick"',
title='Owner'
)
repository: str = Field(
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
pull_request_number: int = Field(
description='Pull request number. Eg: 167',
title='Pull Request Number'
)
def github_close_pull_request_printer(output):
if output is None:
return
pprint.pprint(output)
def github_close_pull_request(handle, owner:str, repository:str, pull_request_number: int) -> str:
"""github_close_pull_request returns time at which the pull request was closed
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string
:param owner: Username of the GitHub user. Eg: "johnwick"
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:type pull_request_number: int
:param pull_request_number: Pull request number. Eg: 167
:rtype: String of details of pull request closure
"""
result = []
pr_number = int(pull_request_number)
try:
owner = handle.get_user(owner)
repo_name = owner.login+'/'+repository
repo = handle.get_repo(repo_name)
pr = repo.get_pull(pr_number)
try:
if pr.state == "open":
result = pr.edit(state='closed')
else:
return f"PR number {pr.number} is already closed"
if result is None:
return f"PR {pr.number} was closed at: {pr.closed_at} "
except GithubException as e:
if e.status == 404:
raise Exception(("You need admin access of an organization in case "
"the repository is a part of an organization")) from e
raise e.data
except GithubException as e:
if e.status == 403:
raise Exception("You need admin access") from e
if e.status == 404:
raise Exception("No such pull number or repository or user found") from e
raise e.data
except Exception as e:
raise e
return result
================================================
FILE: Github/legos/github_count_stars/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_count_stars/__init__.py
================================================
================================================
FILE: Github/legos/github_count_stars/github_count_stars.json
================================================
{
"action_title": "Github Count Stars",
"action_description": "Get count of stars for a repository",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_count_stars",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_INT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_REPO" ]
}
================================================
FILE: Github/legos/github_count_stars/github_count_stars.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from github import GithubException
class InputSchema(BaseModel):
owner: str = Field(
description='Username of the GitHub user. Eg: "johnwick"',
title='Owner'
)
repository: str = Field(
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
def github_count_stars_printer(output):
if output is None:
return
pprint.pprint(output)
def github_count_stars(handle, owner:str, repository:str) -> int:
"""github_count_stars counts number of stars on a repository
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string
:param owner: Username of the GitHub user. Eg: "johnwick"
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:rtype: Count of number of stars on a repository
"""
try:
owner = handle.get_user(owner)
repo_name = owner.login +'/'+ repository
repo = handle.get_repo(repo_name)
stars = repo.get_stargazers()
result = len(list(stars))
except GithubException as e:
if e.status == 403:
raise Exception("You need admin access") from e
if e.status == 404:
raise Exception("No such repository or user found") from e
raise e.data
except Exception as e:
raise e
return result
================================================
FILE: Github/legos/github_create_issue/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_create_issue/__init__.py
================================================
================================================
FILE: Github/legos/github_create_issue/github_create_issue.json
================================================
{
"action_title": "Github Create Issue",
"action_description": "Create a new Github Issue for a repository",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_create_issue",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_ISSUE" ]
}
================================================
FILE: Github/legos/github_create_issue/github_create_issue.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from github import GithubException
class InputSchema(BaseModel):
owner: str = Field(
description='Username of the GitHub user. Eg: "johnwick"',
title='Owner'
)
repository: str = Field(
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
title: str = Field(
description='Title if the Github Issue',
title='Title of the Issue'
)
description: str = Field(
description='Description of the Github Issue',
title='Description of the Issue'
)
assignee: str = Field(
description='Username of the Github User to assign this issue to ',
title='Username of the Assignee'
)
def github_create_issue_printer(output):
if output is None:
return
pprint.pprint(output)
def github_create_issue(
handle,
owner:str,
repository:str,
title:str,
description:str,
assignee: str
) -> Dict:
"""github_create_issue returns details of newly created issue
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string
:param owner: Username of the GitHub user. Eg: "johnwick"
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:type title: string
:param title: Title if the Github Issue
:type description: string
:param description: Description of the Github Issue
:type assignee: string
:param assignee: Username of the Assignee
:rtype: Dict of newly created issue
"""
issue_details = {}
try:
owner = handle.get_user(owner)
repo_name = owner.login + '/' + repository
repo = handle.get_repo(repo_name)
res = repo.create_issue(title=title, body=description, assignee=assignee)
issue_details["title"] = res.title
issue_details["issue_number"] = res.number
issue_details["assignee"] = res.assignee.login
except GithubException as e:
if e.status == 403:
raise Exception("You need admin access") from e
if e.status == 404:
raise Exception("No such repository or user found") from e
raise e.data
except Exception as e:
raise e
return issue_details
================================================
FILE: Github/legos/github_create_team/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_create_team/__init__.py
================================================
================================================
FILE: Github/legos/github_create_team/github_create_team.json
================================================
{
"action_title": "Github Create Team",
"action_description": "Create a new Github Team",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_create_team",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_TEAM"]
}
================================================
FILE: Github/legos/github_create_team/github_create_team.py
================================================
import pprint
from typing import Optional, List, Dict
from pydantic import BaseModel, Field
from unskript.enums.github_team_privacy_enums import GithubTeamPrivacy
from github import GithubException
class InputSchema(BaseModel):
team_name: str = Field(
description='Team name. Eg:"backend"',
title='Team Name'
)
description: Optional[str] = Field(
'',
description='Description of the new team.',
title='Description'
)
privacy: Optional[GithubTeamPrivacy] = Field(
description=('Privacy type to be given to the team. "secret" - only visible to '
'organization owners and members of this team, "closed"- visible to '
'all members of this organization. By default type "secret" will be '
'considered. '),
title='Privacy'
)
organization_name: str = Field(
description='Github Organization Name. Eg: "infosecorg"',
title='Organization Name'
)
repositories: List = Field(
description='List of the GitHub repositories to add to the new team. Eg: ["repo1","repo2"]',
title='repositories',
)
def github_create_team_printer(output):
if output is None:
return
pprint.pprint(output)
def github_create_team(
handle,
organization_name:str,
team_name:str,
repositories:list,
privacy:GithubTeamPrivacy=GithubTeamPrivacy.secret,
description:str=""
) -> Dict:
"""github_create_team returns details of newly created team.
:type handle: object
:param handle: Object returned from task.validate(...).
:type organization_name: string
:param organization_name: Organization name Eg: "infosec"
:type team_name: string
:param team_name: Team name. Eg: "backend"
:type description: string
:param description: Description of the new team.
:type repositories: string
:param repositories: List of the GitHub repositories to add to the new team.
Eg: ["repo1","repo2"]
:type privacy: Enum
:param privacy: Privacy type to be given to the team. "secret" - only visible
to organization owners and members of this team, "closed"- visible to all members
of this organization. By default type "secret" will be considered.
:rtype: Dict of details of newly created team
"""
result = []
team_details = {}
repo_names =[]
list_of_repos = ''
privacy_settings = ''
if privacy is None or len(privacy)==0:
privacy_settings = "secret"
organization = handle.get_organization(organization_name)
for repo in repositories:
list_of_repos = organization.get_repo(repo)
repo_names.append(list_of_repos)
try:
result = organization.create_team(
name=team_name,
repo_names=repo_names,
privacy=privacy_settings,
description=description
)
team_details["name"]= result.name
team_details["id"]= result.id
except GithubException as e:
if e.status == 404:
raise Exception("No such organization found") from e
raise e.data
except Exception as e:
raise e
return team_details
================================================
FILE: Github/legos/github_delete_branch/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_delete_branch/__init__.py
================================================
================================================
FILE: Github/legos/github_delete_branch/github_delete_branch.json
================================================
{
"action_title": "Github Delete Branch",
"action_description": "Delete a github branch",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_delete_branch",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_REPO" ]
}
================================================
FILE: Github/legos/github_delete_branch/github_delete_branch.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from github import GithubException
class InputSchema(BaseModel):
branch_name: str = Field(
description='Branch name. Eg:"dummy-branch-name"',
title='Branch Name'
)
owner: str = Field(
description='Username of the GitHub user. Eg: "johnwick"',
title='Owner'
)
repository: str = Field(
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
def github_delete_branch_printer(output):
if output is None:
return
pprint.pprint(output)
def github_delete_branch(handle, owner:str, repository: str, branch_name: str)-> str:
"""github_delete_branch returns details of the deleted branch.
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string
:param owner: Username of the GitHub user. Eg: "johnwick"
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:type branch_name: string
:param branch_name: Branch Name Eg: "dummy-branch"
:rtype: Deleted branch info
"""
flag_to_check_branch = 0
try:
user = handle.get_user(login=owner)
repo_name = user.login+"/"+repository
repo = handle.get_repo(repo_name)
if repo.full_name == repo_name:
branch = repo.get_branch(branch_name)
flag_to_check_branch = 0
if branch.name == branch_name:
flag_to_check_branch = 1
ref = repo.get_git_ref(f"heads/{branch_name}")
ref.delete()
return f"{branch_name} successfully deleted"
if flag_to_check_branch == 0:
return [f"{branch_name} not found"]
except GithubException as e:
if e.status == 403:
raise Exception("You need admin access") from e
if e.status == 404:
raise Exception("No such username or repository") from e
raise e.data
except Exception as e:
raise e
return None
================================================
FILE: Github/legos/github_get_branch/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_get_branch/__init__.py
================================================
================================================
FILE: Github/legos/github_get_branch/github_get_branch.json
================================================
{
"action_title": "Github Get Branch",
"action_description": "Get Github branch for a user in a repository",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_get_branch",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_REPO" ]
}
================================================
FILE: Github/legos/github_get_branch/github_get_branch.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from github import GithubException
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
branch_name: str = Field(
description='Branch name. Eg:"dummy-branch-name"',
title='Branch Name'
)
owner: str = Field(
description='Username of the GitHub user. Eg: "johnwick"',
title='Owner'
)
repository: str = Field(
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
def github_get_branch_printer(output):
if output is None:
return
pprint.pprint(output)
def github_get_branch(handle, owner:str, repository: str, branch_name: str) -> Dict:
"""github_get_branch returns details of commits (if any) of a branche for a user.
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string
:param owner: Username of the GitHub user. Eg: "johnwick"
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:type branch_name: string
:param branch_name: Branch Name Eg: "dummy-branch"
:rtype: Dict of branch with commits for a user for a repository
"""
branch_info = {}
try:
user = handle.get_user(login=owner)
repo_name = user.login+"/"+repository
repo = handle.get_repo(repo_name)
if repo.full_name == repo_name:
branch = repo.get_branch(branch_name)
if branch.name == branch_name:
branch_info["branch"] = branch.name
branch_info["commit"] = branch.commit.sha
else:
return [f"{branch_name} not found"]
except GithubException as e:
if e.status == 403:
raise Exception("You need admin access") from e
if e.status == 404:
raise Exception("No such repository or user found") from e
raise e.data
except Exception as e:
raise e
return branch_info
================================================
FILE: Github/legos/github_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_get_handle/__init__.py
================================================
================================================
FILE: Github/legos/github_get_handle/github_get_handle.json
================================================
{
"action_title": "Get Github Handle",
"action_description": "Get Github Handle",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_supports_iteration": false
}
================================================
FILE: Github/legos/github_get_handle/github_get_handle.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def github_get_handle(handle):
"""github_get_handle returns the github handle.
:rtype: Github handle.
"""
return handle
================================================
FILE: Github/legos/github_get_issue/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_get_issue/__init__.py
================================================
================================================
FILE: Github/legos/github_get_issue/github_get_issue.json
================================================
{
"action_title": "Github Get Issue",
"action_description": "Get Github Issue from a repository",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_get_issue",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_ISSUE" ]
}
================================================
FILE: Github/legos/github_get_issue/github_get_issue.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from github import GithubException
class InputSchema(BaseModel):
owner: str = Field(
..., description='Username of the GitHub user. Eg: "johnwick"', title='Owner'
)
repository: str = Field(
...,
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
issue_number: int = Field(
...,
description='Issue Number. Eg: "367"',
title='Issue Number',
)
def github_get_issue_printer(output):
if output is None:
return
pprint.pprint(output)
def github_get_issue(handle, owner:str, repository:str, issue_number:int) -> Dict:
"""github_get_issue returns details of the issue
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string
:param owner: Username of the GitHub user. Eg: "johnwick"
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:type issue_number: int
:param issue_number: Issue number. Eg: 345
:rtype: Dict of issue details
"""
issue_no = int(issue_number)
issue_details = {}
try:
owner = handle.get_user(owner)
repo_name = owner.login + '/' + repository
repo = handle.get_repo(repo_name)
issue = repo.get_issue(issue_no)
issue_details["title"] = issue.title
issue_details["issue_number"] = issue.number
if isinstance(issue.assignee, type(None)):
issue_details["assignee"] = issue.assignee.login
else:
issue_details["assignee"] = issue.assignee
issue_details["body"] = issue.body
issue_details["state"] = issue.state
dummy_date = issue.updated_at
formatted_date = dummy_date.strftime("%d-%m-%Y")
issue_details["updated_at"] = formatted_date
except GithubException as e:
if e.status == 403:
raise Exception("You need admin access") from e
if e.status == 404:
raise Exception("No such repository or user found") from e
raise e.data
except Exception as e:
raise e
return issue_details
================================================
FILE: Github/legos/github_get_open_branches/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_get_open_branches/__init__.py
================================================
================================================
FILE: Github/legos/github_get_open_branches/github_get_open_branches.json
================================================
{
"action_title": "Github Get Open Branches",
"action_description": "Get first 100 open branches for a given user in a given repo.",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_get_open_branches",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_REPO" ]
}
================================================
FILE: Github/legos/github_get_open_branches/github_get_open_branches.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from github import GithubException
class InputSchema(BaseModel):
owner: str = Field(
description='Username of the GitHub user. Eg: "johnwick"',
title='Owner'
)
repository: str = Field(
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
def github_get_open_branches_printer(output):
if output is None:
return
pprint.pprint(output)
def github_get_open_branches(handle, owner: str, repository: str)-> List:
"""github_get_open_branches returns 100 open github branches for a user.
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string
:param owner: Username of the GitHub user. Eg: "johnwick"
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:rtype: List of branches for a user for a repository
"""
result = []
try:
user = handle.get_user(login=owner)
repos = user.get_repos()
repo_name = owner+"/"+repository
if len(list(repos)) == 0:
return [f"{owner} does not have any repositories"]
for repo in repos:
if repo.full_name == repo_name:
branches = repo.get_branches()
result = [branch.name for branch in branches[:100]]
except GithubException as e:
if e.status == 403:
raise Exception("You need admin access") from e
if e.status == 404:
raise Exception("No such pull number or repository or user found") from e
raise e.data
except Exception as e:
raise e
return result
================================================
FILE: Github/legos/github_get_open_pull_requests/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_get_open_pull_requests/__init__.py
================================================
================================================
FILE: Github/legos/github_get_open_pull_requests/github_get_open_pull_requests.json
================================================
{
"action_title": "Github get open pull requests",
"action_description": "This action gets details of open pull requests",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_get_open_pull_requests",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_PR" ],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Github/legos/github_get_open_pull_requests/github_get_open_pull_requests.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import Tuple, Optional
from pydantic import BaseModel, Field
from github import GithubException, BadCredentialsException, UnknownObjectException
from tabulate import tabulate
class InputSchema(BaseModel):
owner: Optional[str] = Field(
description='Username of the GitHub user. Eg: "johnwick"',
title='Owner'
)
repository: str = Field(
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
def github_get_open_pull_requests_printer(output_tuple):
if output_tuple is None or output_tuple[1] is None:
return
success, output = output_tuple
if not success:
headers = ["PR Number", "Title", "Changed Files", "Review Comments", "Commits"]
table = [[pr["pull_number"], pr["pull_title"], pr["pull_changed_files"],
pr["pull_review_comments"], pr["pull_commits"]] for pr in output]
print(tabulate(table, headers, tablefmt="grid"))
else:
print("No unmerged pull requests found.")
def github_get_open_pull_requests(handle, repository: str, owner: str = "") -> Tuple:
"""github_get_open_pull_requests returns status, list of open pull requests.
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string (Optional)
:param owner: Username of the GitHub user. Eg: "johnwick"
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:rtype: Status, List of details of pull requests if it is not merged
"""
result = []
try:
if not owner:
owner = handle.get_user().login
repo = handle.get_repo(f"{owner}/{repository}")
prs = repo.get_pulls()
# Check if there are no open pull requests
if prs.get_page(0) == []:
print("No pull requests are open at the moment.")
return (True, None)
for pr in prs:
if not pr.is_merged():
prs_dict = {
"pull_number": pr.number,
"pull_title": pr.title,
"pull_changed_files": pr.changed_files,
"pull_review_comments": pr.review_comments,
"pull_commits": pr.commits
}
result.append(prs_dict)
except Exception as e:
raise e
return (False, result) if result else (True, None)
================================================
FILE: Github/legos/github_get_pull_request/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_get_pull_request/__init__.py
================================================
================================================
FILE: Github/legos/github_get_pull_request/github_get_pull_request.json
================================================
{
"action_title": "Github Get Pull Request",
"action_description": "Get Github Pull Request for a user in a repository",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_get_pull_request",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_PR" ]
}
================================================
FILE: Github/legos/github_get_pull_request/github_get_pull_request.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from github import GithubException, BadCredentialsException, UnknownObjectException
class InputSchema(BaseModel):
owner: str = Field(
description='Username of the GitHub user. Eg: "johnwick"',
title='Owner'
)
repository: str = Field(
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
pull_request_number: int = Field(
description='Pull request number. Eg: 167',
title='Pull Request Number'
)
def github_get_pull_request_printer(output):
if output is None:
return
pprint.pprint(output)
def github_get_pull_request(handle, owner:str, repository:str, pull_request_number: int) -> Dict:
"""github_get_pull_request returns details of pull requests for a user
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string
:param owner: Username of the GitHub user. Eg: "johnwick"
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:type pull_request_number: int
:param pull_request_number: Pull request number. Eg: 167
:rtype: Dict of details of pull request for a user
"""
prs_dict = {}
pr_number = int(pull_request_number)
try:
owner = handle.get_user(owner)
repo_name = owner.login+'/'+repository
repo = handle.get_repo(repo_name)
pr = repo.get_pull(pr_number)
prs_dict["pull_number"] = pr.number
prs_dict["pull_title"] = pr.title
prs_dict["pull_changed_files"] = pr.changed_files
prs_dict["pull_review_comments"] = pr.review_comments
prs_dict["pull_commits"] = pr.commits
except GithubException as e:
if e.status == 403:
raise BadCredentialsException("You need admin access") from e
if e.status == 404:
raise UnknownObjectException("No such pull number or repository or user found") from e
raise e.data
except Exception as e:
raise e
return prs_dict
================================================
FILE: Github/legos/github_get_team/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_get_team/__init__.py
================================================
================================================
FILE: Github/legos/github_get_team/github_get_team.json
================================================
{
"action_title": "Github Get Team",
"action_description": "Github Get Team",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_get_team",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_TEAM" ]
}
================================================
FILE: Github/legos/github_get_team/github_get_team.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from github import GithubException, BadCredentialsException, UnknownObjectException
class InputSchema(BaseModel):
organization_name: str = Field(
description='Github Organization Name',
title='Organization Name'
)
team_name: str = Field(
description='Team name in a GitHub Organization',
title='Team name'
)
def github_get_team_printer(output):
if output is None:
return
pprint.pprint(output)
def github_get_team(handle, organization_name:str, team_name:str) -> Dict:
"""github_get_team returns details of the team
:type handle: object
:param handle: Object returned from task.validate(...).
:type organization_name: string
:param organization_name: Organization name Eg: "infosec"
:type team_name: string
:param team_name: Team name. Eg: "backend"
:rtype: List of a teams and its details
"""
result = []
try:
organization = handle.get_organization(organization_name)
team = organization.get_team_by_slug(team_name)
team_details = {}
team_details["team_name"] = team.name
team_details["team_id"] = team.id
team_details["members_count"]= team.members_count
team_details["repos_count"]= team.repos_count
team_details["privacy"]= team.privacy
team_details["permission"]= team.permission
except GithubException as e:
if e.status == 403:
raise BadCredentialsException("You need admin access") from e
if e.status == 404:
raise UnknownObjectException("No such organization or repository found") from e
raise e.data
except Exception as e:
raise e
return result
================================================
FILE: Github/legos/github_get_user/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_get_user/__init__.py
================================================
================================================
FILE: Github/legos/github_get_user/github_get_user.json
================================================
{
"action_title": "Github Get User",
"action_description": "Get Github User details",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_get_user",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_USER" ]
}
================================================
FILE: Github/legos/github_get_user/github_get_user.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from github import GithubException, UnknownObjectException
class InputSchema(BaseModel):
owner: str = Field(
description='Username of the GitHub user. Eg: "johnwick"',
title='Owner'
)
def github_get_user_printer(output):
if output is None:
return
pprint.pprint(output)
def github_get_user(handle, owner:str) -> Dict:
"""github_get_user returns details of a user
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string
:param owner: Username of the GitHub user. Eg: "johnwick"
:rtype: Dict of details of a user
"""
try:
user_details = {}
user = handle.get_user(login=owner)
user_details["name"] = user.login
user_details["company"] = user.company
user_details["email"] = user.email
user_details["bio"] = user.bio
user_details["followers"] = user.followers
user_details["following"] = user.following
except GithubException as e:
if e.status == 404:
raise UnknownObjectException("User not found") from e
raise e.data
except Exception as e:
raise e
return user_details
================================================
FILE: Github/legos/github_invite_user_to_org/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_invite_user_to_org/__init__.py
================================================
================================================
FILE: Github/legos/github_invite_user_to_org/github_invite_user_to_org.json
================================================
{
"action_title": "Github Invite User to Organization",
"action_description": "Invite a Github User to an Organization",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_invite_user_to_org",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_USER","CATEGORY_TYPE_GITHUB_ORG" ]
}
================================================
FILE: Github/legos/github_invite_user_to_org/github_invite_user_to_org.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, List
from pydantic import BaseModel, Field
from unskript.enums.github_user_role_enums import GithubUserRole
from github import GithubException, BadCredentialsException, UnknownObjectException
class InputSchema(BaseModel):
email: str = Field(
description=('Email address of the user to invite to the Github Organization. '
'Eg: user@gmail.com'),
title='Email',
)
organization_name: str = Field(
description='Github Organization Name',
title='Organization Name'
)
role: Optional[GithubUserRole] = Field(
'',
description=('Role to assign to the new user. By default, direct_member role will '
'be assigned. Eg:"admin" or "direct_member" or "billing_manager". '),
title='Role',
)
list_of_teams: List = Field(
description='List of teams to add the user to. Eg:["frontend-dev","backend-dev"]',
title='List of Teams',
)
def github_invite_user_to_org_printer(output):
if output is None:
return
pprint.pprint(output)
def github_invite_user_to_org(
handle,
organization_name:str,
email:str,
list_of_teams:list,
role:GithubUserRole=GithubUserRole.direct_member
)-> str:
"""github_invite_user_to_org returns status of the invite
:type handle: object
:param handle: Object returned from task.validate(...).
:type organization_name: string
:param organization_name: Organization name Eg: "infosec"
:type list_of_teams: list
:param list_of_teams: List of teams to add the user to. Eg:["frontend-dev","backend-dev"]
:type email: str
:param email: Email address of the user to invite to the Github Organization.
Eg: user@gmail.com
:type role: GithubUserRole (Enum)
:param role: Role to assign to the new user. By default, direct_member role will be
assigned. Eg:"admin" or "direct_member" or "billing_manager".
:rtype: String, Status message for a the invite
"""
result = []
teams_list = []
organization = handle.get_organization(organization_name)
if role is None:
role = "direct_member"
try:
teams = organization.get_teams()
for each_team in teams:
if each_team.name in list_of_teams:
teams_list.append(each_team)
result = organization.invite_user(email=email, role=role, teams=teams_list)
if result is None:
return "Successfully sent invite"
except GithubException as e:
if e.status == 403:
raise BadCredentialsException("You need admin access") from e
if e.status == 404:
raise UnknownObjectException("No such organization found") from e
raise e.data
return None
================================================
FILE: Github/legos/github_issue_comment/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_issue_comment/__init__.py
================================================
================================================
FILE: Github/legos/github_issue_comment/github_issue_comment.json
================================================
{
"action_title": "Github Comment on an Issue",
"action_description": "Add a comment to the selected GitHub Issue",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_issue_comment",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_ISSUE" ]
}
================================================
FILE: Github/legos/github_issue_comment/github_issue_comment.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
owner: str = Field(
description='Username of the GitHub user. Eg: "johnwick"',
title='Owner'
)
repository: str = Field(
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
issue_comment: str = Field(
description='The Comment to add to the Issue',
title='Issue Comment'
)
issue_number: str = Field(
description='Github Issue where Comment is to be added.',
title='Issue Number'
)
def github_issue_comment_printer(output):
if output is None:
return
pprint.pprint(output)
def github_issue_comment(
handle,
owner:str,
repository:str,
issue_number:str,
issue_comment:str
) -> str:
issue_number = int(issue_number)
owner = handle.get_user(owner)
repo_name = owner.login +'/'+ repository
repo = handle.get_repo(repo_name)
# Get the issue by its number
issue = repo.get_issue(issue_number)
# Add a comment to the issue
issue.create_comment(issue_comment)
return "added comment"
================================================
FILE: Github/legos/github_list_open_issues/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_list_open_issues/__init__.py
================================================
================================================
FILE: Github/legos/github_list_open_issues/github_list_open_issues.json
================================================
{
"action_title": "Github List Open Issues",
"action_description": "List open Issues in a Github Repository",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_list_open_issues",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_ISSUE" ]
}
================================================
FILE: Github/legos/github_list_open_issues/github_list_open_issues.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from github import GithubException, BadCredentialsException, UnknownObjectException
class InputSchema(BaseModel):
owner: str = Field(
..., description='Username of the GitHub user. Eg: "johnwick"', title='Owner'
)
repository: str = Field(
...,
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
def github_list_open_issues_printer(output):
if output is None:
return
pprint.pprint(output)
def github_list_open_issues(handle, owner:str, repository:str) -> List:
"""github_list_open_issues returns details of open issues
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string
:param owner: Username of the GitHub user. Eg: "johnwick"
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:rtype: List of open issues
"""
result = []
try:
owner = handle.get_user(owner)
repo_name = owner.login +'/'+ repository
repo = handle.get_repo(repo_name)
issues = repo.get_issues()
for issue in issues:
if issue.state == 'open':
issue_details = {}
issue_details["title"] = issue.title
issue_details["issue_number"] = issue.number
if isinstance(issue.assignee, type(None)):
issue_details["assignee"] = issue.assignee.login
else:
issue_details["assignee"] = issue.assignee
result.append(issue_details)
except GithubException as e:
if e.status == 403:
raise BadCredentialsException("You need admin access") from e
if e.status == 404:
raise UnknownObjectException("No such repository or user found") from e
raise e.data
except Exception as e:
raise e
return result
================================================
FILE: Github/legos/github_list_org_members/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_list_org_members/__init__.py
================================================
================================================
FILE: Github/legos/github_list_org_members/github_list_org_members.json
================================================
{
"action_title": "Github List Organization Members",
"action_description": "List Github Organization Members",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_list_org_members",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_ORG" ]
}
================================================
FILE: Github/legos/github_list_org_members/github_list_org_members.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from github import GithubException, BadCredentialsException, UnknownObjectException
class InputSchema(BaseModel):
organization_name: str = Field(
description='Name of Github Organization. Eg: "unskript"',
title='Organization Name',
)
def github_list_org_members_printer(output):
if output is None:
return
pprint.pprint(output)
def github_list_org_members(handle, organization_name:str)-> List:
"""github_remove_member_from_org returns the status to remove a member
:type organization_name: string
:param organization_name: Name of Github Organization. Eg: "unskript"
:rtype: List of return status of removing a member from Org
"""
result = []
try:
organization = handle.get_organization(organization_name)
members = organization.get_members()
result = [member.login for member in members]
except GithubException as e:
if e.status == 403:
raise BadCredentialsException("You need admin access") from e
if e.status == 404:
raise UnknownObjectException("No such organization or user found") from e
raise e.data
except Exception as e:
raise e
return result
================================================
FILE: Github/legos/github_list_pull_request_commits/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_list_pull_request_commits/__init__.py
================================================
================================================
FILE: Github/legos/github_list_pull_request_commits/github_list_pull_request_commits.json
================================================
{
"action_title": "Github List PR Commits",
"action_description": "Github List all Pull Request Commits",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_list_pull_request_commits",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_PR" ]
}
================================================
FILE: Github/legos/github_list_pull_request_commits/github_list_pull_request_commits.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from github import GithubException, BadCredentialsException, UnknownObjectException
class InputSchema(BaseModel):
owner: str = Field(
description='Username of the GitHub user. Eg: "johnwick"',
title='Owner'
)
repository: str = Field(
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
pull_request_number: int = Field(
description='Pull request number. Eg: 167',
title='Pull Request Number'
)
def github_list_pull_request_commits_printer(output):
if output is None:
return
pprint.pprint(output)
def github_list_pull_request_commits(
handle,
owner:str,
repository:str,
pull_request_number: int
) -> List:
"""github_list_pull_request_commits returns details of pull requests commits
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string
:param owner: Username of the GitHub user. Eg: "johnwick"
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:type pull_request_number: int
:param pull_request_number: Pull request number. Eg: 167
:rtype: List of details of pull request commits
"""
result = []
pr_number = int(pull_request_number)
try:
owner = handle.get_user(owner)
repo_name = owner.login+'/'+repository
repo = handle.get_repo(repo_name)
pr = repo.get_pull(pr_number)
commits = pr.get_commits()
for commit in commits:
commits_dict = {}
commits_dict["sha"] = commit.sha
commits_dict["committer"] = commit.committer.login
commits_dict["date"] = commit.commit.author.date
result.append(commits_dict)
except GithubException as e:
if e.status == 403:
raise BadCredentialsException("You need admin access") from e
if e.status == 404:
raise UnknownObjectException("No such pull number or repository or user found") from e
raise e.data
except Exception as e:
raise e
return result
================================================
FILE: Github/legos/github_list_pull_request_reviewers/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_list_pull_request_reviewers/__init__.py
================================================
================================================
FILE: Github/legos/github_list_pull_request_reviewers/github_list_pull_request_reviewers.json
================================================
{
"action_title": "Github List Pull Request Reviewers",
"action_description": "List PR reviewers for a PR",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_list_pull_request_reviewers",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_PR" ]
}
================================================
FILE: Github/legos/github_list_pull_request_reviewers/github_list_pull_request_reviewers.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from github import GithubException, BadCredentialsException, UnknownObjectException
class InputSchema(BaseModel):
owner: str = Field(
description='Username of the GitHub user. Eg: "johnwick"',
title='Owner'
)
repository: str = Field(
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
pull_request_number: int = Field(
description='Pull request number. Eg: 167',
title='Pull Request Number'
)
def github_get_pull_request_reviewers_printer(output):
if output is None:
return
pprint.pprint(output)
def github_list_pull_request_reviewers(
handle,
owner:str,
repository:str,
pull_request_number: int
) -> List:
"""github_get_pull_request_reviewers returns reviewers of a pull request
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string
:param owner: Username of the GitHub user. Eg: "johnwick"
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:type pull_request_number: int
:param pull_request_number: Pull request number. Eg: 167
:rtype: List of reviewers of a pull request
"""
result = []
pr_number = int(pull_request_number)
try:
owner = handle.get_user(owner)
repo_name = owner.login+'/'+repository
repo = handle.get_repo(repo_name)
pr = repo.get_pull(pr_number)
review_requests = pr.get_review_requests()
for request in review_requests:
for r in request:
result.append(r.login)
except GithubException as e:
if e.status == 403:
raise BadCredentialsException("You need admin access") from e
if e.status == 404:
raise UnknownObjectException("No such pull number or repository or user found") from e
raise e.data
except Exception as e:
raise e
if len(result) == 0:
return [f"No reviewers added for Pull Number {pr.number}"]
return result
================================================
FILE: Github/legos/github_list_pull_requests/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_list_pull_requests/__init__.py
================================================
================================================
FILE: Github/legos/github_list_pull_requests/github_list_pull_requests.json
================================================
{
"action_title": "Github List Pull Requests",
"action_description": "List pull requests for a user in a repository",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_list_pull_requests",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_PR" ]
}
================================================
FILE: Github/legos/github_list_pull_requests/github_list_pull_requests.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from github import GithubException, BadCredentialsException, UnknownObjectException
class InputSchema(BaseModel):
owner: str = Field(
description='Username of the GitHub user. Eg: "johnwick"',
title='Owner'
)
repository: str = Field(
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
def github_list_pull_requests_printer(output):
if output is None:
return
pprint.pprint(output)
def github_list_pull_requests(handle, owner:str, repository:str) -> List:
"""github_list_pull_requests returns all pull requests for a user
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string
:param owner: Username of the GitHub user. Eg: "johnwick"
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:rtype: List of pull requests for a user
"""
result = []
try:
owner = handle.get_user(owner)
repo_name = owner.login+'/'+repository
repo = handle.get_repo(repo_name)
#Fetch open PRs and sort by created
prs = repo.get_pulls(state='open', sort='created')
for pr in prs:
prs_dict = {}
prs_dict[pr.number] = pr.title
result.append(prs_dict)
except GithubException as e:
if e.status == 403:
raise BadCredentialsException("You need admin access") from e
if e.status == 404:
raise UnknownObjectException("No such repository or user found") from e
raise e.data
except Exception as e:
raise e
return result
================================================
FILE: Github/legos/github_list_stale_issues/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_list_stale_issues/__init__.py
================================================
================================================
FILE: Github/legos/github_list_stale_issues/github_list_stale_issues.json
================================================
{
"action_title": "Github List Stale Issues",
"action_description": "List Stale Github Issues that have crossed a certain age limit.",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_list_stale_issues",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_ISSUE" ],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Github/legos/github_list_stale_issues/github_list_stale_issues.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import Tuple, Optional
import datetime
from pydantic import BaseModel, Field
from tabulate import tabulate
from github import GithubException, BadCredentialsException, UnknownObjectException
class InputSchema(BaseModel):
owner: Optional[str] = Field(
description='Username of the GitHub user. Eg: "johnwick"',
title='Owner'
)
repository: str = Field(
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
age_to_stale: Optional[int] = Field(
'14',
description=('Age in days to check if the issue creation or updation dates are '
'older and hence classify those issues as stale Eg:45'),
title='Age to Stale',
)
def github_list_stale_issues_printer(output):
if output is None or output[1] is None:
return
success, res = output
if not success:
headers = ["Title", "Issue Number", "Assignee"]
table = [[issue["title"], issue["issue_number"], issue["assignee"]] for issue in res]
print(tabulate(table, headers, tablefmt="grid"))
else:
print("No stale issues found.")
def github_list_stale_issues(handle, repository:str, age_to_stale:int=14) -> Tuple:
"""github_list_stale_issues returns details of stale issues
:type handle: object
:param handle: Object returned from task.validate(...).
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:type owner: string (Optional)
:param owner: Username of the GitHub user. Eg: "johnwick"
:type age_to_stale: int (Optional)
:param age_to_stale: Age in days to check if the issue creation or updation dates
are older and hence classify those issues as stale Eg:45',
:rtype: List of stale issues
"""
result = []
try:
owner = handle.get_user().login # Fetch the owner (authenticated user) login
repo = handle.get_repo(f"{owner}/{repository}")
issues = repo.get_issues()
# Check if there are no open issues
if issues.get_page(0) == []:
print("No issues are open at the moment.")
return (True, None)
today = datetime.datetime.now()
for issue in issues:
creation_date = issue.created_at
updation_date = issue.updated_at
diff_in_updation = (today - updation_date).days
diff_in_creation = (today - creation_date).days
if diff_in_creation >= age_to_stale or diff_in_updation >= age_to_stale:
issue_details = {
"title": issue.title,
"issue_number": issue.number,
"assignee": "None" if issue.assignee is None else issue.assignee.login
}
result.append(issue_details)
except Exception as e:
raise e
return (False, result) if result else (True, None)
================================================
FILE: Github/legos/github_list_stale_pull_requests/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_list_stale_pull_requests/__init__.py
================================================
================================================
FILE: Github/legos/github_list_stale_pull_requests/github_list_stale_pull_requests.json
================================================
{
"action_title": "Github List Stale Pull Requests",
"action_description": "Check for any Pull requests over a certain age. ",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_list_stale_pull_requests",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_PR" ],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Github/legos/github_list_stale_pull_requests/github_list_stale_pull_requests.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import Tuple, Optional
import datetime
from pydantic import BaseModel, Field
from tabulate import tabulate
from github import GithubException, BadCredentialsException, UnknownObjectException
class InputSchema(BaseModel):
owner: Optional[str] = Field(
description='Username of the GitHub user. Eg: "johnwick"',
title='Owner'
)
repository: str = Field(
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository'
)
threshold: Optional[int] = Field(
description=("Threshold number of days to check for a stale PR. Eg: 45 -> "
"All PR's older than 45 days will be displayed"),
title='Threshold (in Days)'
)
def github_list_stale_pull_requests_printer(output_tuple):
if output_tuple is None or output_tuple[1] is None:
return
success, output = output_tuple
if not success:
headers = ["PR Number", "Title"]
table = [[pr["number"], pr["title"]] for pr in output]
print(tabulate(table, headers, tablefmt="grid"))
else:
print("No stale pull requests found.")
def github_list_stale_pull_requests(handle, repository: str, threshold: int = 14, owner:str = "") -> Tuple:
"""github_list_stale_pull_requests returns stale pull requests
:type handle: object
:param handle: Object returned from task.validate(...).
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:type owner: string (Optional)
:param owner: Username of the GitHub user. Eg: "johnwick"
:type threshold: int (Optional)
:param threshold: Threshold number of days to find stale PR's
:rtype: Status, List of stale pull requests
"""
if handle is None or not owner or not repository or threshold is None or threshold <= 0:
raise ValueError("Invalid input parameters")
result = []
try:
if len(owner)==0 or owner is None:
owner = handle.get_user().login
owner = handle.get_user().login
repo = handle.get_repo(f"{owner}/{repository}")
prs = repo.get_pulls()
# Check if there are no open pull requests
if prs.get_page(0) == []:
print("No pull requests are open at the moment.")
return (True, None)
today = datetime.datetime.now()
for pr in repo.get_pulls():
print(pr)
creation_date = pr.created_at
diff = (today - creation_date).days
if diff >= threshold:
result.append({"number": pr.number, "title": pr.title})
except Exception as e:
raise e
return (False, result) if result else (True, None)
================================================
FILE: Github/legos/github_list_stargazers/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_list_stargazers/__init__.py
================================================
================================================
FILE: Github/legos/github_list_stargazers/github_list_stargazers.json
================================================
{
"action_title": "Github List Stargazers",
"action_description": "List of Github users that have starred (essentially bookmarked) a repository",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_list_stargazers",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_REPO" ]
}
================================================
FILE: Github/legos/github_list_stargazers/github_list_stargazers.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from github import GithubException, BadCredentialsException, UnknownObjectException
class InputSchema(BaseModel):
owner: str = Field(
..., description='Username of the GitHub user. Eg: "johnwick"', title='Owner'
)
repository: str = Field(
...,
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
def github_list_stargazers_printer(output):
if output is None:
return
pprint.pprint(output)
def github_list_stargazers(handle, owner:str, repository:str) -> List:
"""github_list_stargazers returns last 100 stargazers for a Github Repository
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string
:param owner: Username of the GitHub user. Eg: "johnwick"
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:rtype: List of last 100 stargazers for a Github Repository
"""
result = []
try:
owner = handle.get_user(owner)
repo_name = owner.login +'/'+ repository
repo = handle.get_repo(repo_name)
stars = repo.get_stargazers_with_dates()
for star in stars[len(list(stars))-100:]:
stargazer_details = {}
stargazer_details["name"] = star.user.login
dummy_date = star.starred_at
formatted_date = dummy_date.strftime("%d-%m-%Y")
stargazer_details["date"] = formatted_date
result.append(stargazer_details)
except GithubException as e:
if e.status == 403:
raise BadCredentialsException("You need admin access") from e
if e.status == 404:
raise UnknownObjectException("No such repository or user found") from e
raise e.data
except Exception as e:
raise e
return result
================================================
FILE: Github/legos/github_list_team_members/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_list_team_members/__init__.py
================================================
================================================
FILE: Github/legos/github_list_team_members/github_list_team_members.json
================================================
{
"action_title": "Github List Team Members",
"action_description": "List Github Team Members for a given Team",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_list_team_members",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_TEAM", "CATEGORY_TYPE_GITHUB_USER" ]
}
================================================
FILE: Github/legos/github_list_team_members/github_list_team_members.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from github import GithubException, BadCredentialsException, UnknownObjectException
class InputSchema(BaseModel):
organization_name: str = Field(
..., description='Github Organization Name', title='Organization Name'
)
team_name: str = Field(
..., description='Team name in a GitHub Organization', title='Team name'
)
def github_list_team_members_printer(output):
if output is None:
return
pprint.pprint(output)
def github_list_team_members(handle, organization_name:str, team_name:str) -> List:
"""github_list_team_members returns details of the team members for a given team
:type handle: object
:param handle: Object returned from task.validate(...).
:type organization_name: string
:param organization_name: Organization name Eg: "infosec"
:type team_name: string
:param team_name: Team name. Eg: "backend"
:rtype: List of a teams members details
"""
result = []
try:
organization = handle.get_organization(organization_name)
team = organization.get_team_by_slug(team_name)
members = team.get_members()
for member in members:
member_details = {}
member_details["name"] = member.login
member_details["id"] = member.id
result.append(member_details)
except GithubException as e:
if e.status == 403:
raise BadCredentialsException("You need admin access") from e
if e.status == 404:
raise UnknownObjectException("No such organization or team found") from e
raise e.data
except Exception as e:
raise e
return result
================================================
FILE: Github/legos/github_list_team_repos/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_list_team_repos/__init__.py
================================================
================================================
FILE: Github/legos/github_list_team_repos/github_list_team_repos.json
================================================
{
"action_title": "Github List Team Repositories",
"action_description": "Github List Team Repositories",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_list_team_repos",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_REPO" ]
}
================================================
FILE: Github/legos/github_list_team_repos/github_list_team_repos.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from github import GithubException, BadCredentialsException, UnknownObjectException
class InputSchema(BaseModel):
organization_name: str = Field(
description='Name of the GitHub Organization. Eg: "wheelorg"',
title='Organization Name',
)
team_name: str = Field(
description='Team name. Eg: "backend"',
title='Team Name'
)
def github_list_team_repos_printer(output):
if output is None:
return
pprint.pprint(output)
def github_list_team_repos(handle, organization_name:str, team_name:str) -> List:
"""github_list_team_repos returns list of repositories in a team
:type handle: object
:param handle: Object returned from task.validate(...).
:type organization_name: string
:param organization_name: Organization name Eg: "infosec"
:type team_name: string
:param team_name: Team name. Eg: "backend"
:rtype: List of repositories in a team
"""
result = []
try:
organization = handle.get_organization(organization_name)
team = organization.get_team_by_slug(team_name)
repos = team.get_repos()
result = [repo.full_name for repo in repos]
except GithubException as e:
if e.status == 403:
raise BadCredentialsException("You need admin access") from e
if e.status == 404:
raise UnknownObjectException("No such organization or repository found") from e
raise e.data
except Exception as e:
raise e
return result
================================================
FILE: Github/legos/github_list_teams_in_org/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_list_teams_in_org/__init__.py
================================================
================================================
FILE: Github/legos/github_list_teams_in_org/github_list_teams_in_org.json
================================================
{
"action_title": "Github List Teams in Organization",
"action_description": "List teams in a organization in GitHub",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_list_teams_in_org",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_TEAM","CATEGORY_TYPE_GITHUB_ORG" ]
}
================================================
FILE: Github/legos/github_list_teams_in_org/github_list_teams_in_org.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from github import GithubException, BadCredentialsException
class InputSchema(BaseModel):
organization_name: str = Field(
description='Name of the GitHub Organization. Eg: "wheelorg"',
title='Organization Name',
)
def github_list_teams_in_org_printer(output):
if output is None:
return
pprint.pprint(output)
def github_list_teams_in_org(handle, organization_name:str) -> List:
"""github_list_teams_in_org returns 100 open github branches for a user.
:type handle: object
:param handle: Object returned from task.validate(...).
:type organization_name: string
:param organization_name: Name of the GitHub Organization. Eg: "wheelorg"
:rtype: List of teams in a github org
"""
result = []
organization = handle.get_organization(organization_name)
teams = organization.get_teams()
try:
[result.append(team.name) for team in teams]
except GithubException as e:
if e.status == 403:
raise BadCredentialsException("You need admin access") from e
raise e.data
except Exception as e:
raise e
return result
================================================
FILE: Github/legos/github_list_webhooks/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_list_webhooks/__init__.py
================================================
================================================
FILE: Github/legos/github_list_webhooks/github_list_webhooks.json
================================================
{
"action_title": "Github List Webhooks",
"action_description": "List webhooks for a repository",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_list_webhooks",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB" ]
}
================================================
FILE: Github/legos/github_list_webhooks/github_list_webhooks.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from github import GithubException, BadCredentialsException, UnknownObjectException
class InputSchema(BaseModel):
owner: str = Field(
..., description='Username of the GitHub user. Eg: "johnwick"', title='Owner'
)
repository: str = Field(
...,
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
def github_list_webhooks_printer(output):
if not output:
return
pprint.pprint(output)
def github_list_webhooks(handle, owner:str, repository: str) -> List:
"""github_list_webhooks returns details of webhooks for a repository
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string
:param owner: Username of the GitHub user. Eg: "johnwick"
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:rtype: List of details of webhooks for a repository
"""
result = []
try:
owner = handle.get_user(owner)
repo_name = owner.login +'/'+ repository
repo = handle.get_repo(repo_name)
webhooks = repo.get_hooks()
for hook in webhooks:
hooks = {}
hooks['url'] = hook.url
hooks['id'] = hook.id
hooks['active'] = hook.active
hooks['events'] = hook.events
hooks['config'] = hook.config
result.append(hooks)
except GithubException as e:
if e.status == 403:
raise BadCredentialsException("You need admin access") from e
if e.status == 404:
raise UnknownObjectException("No such repository or user found") from e
raise e.data
except Exception as e:
raise e
return result
================================================
FILE: Github/legos/github_merge_pull_request/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_merge_pull_request/__init__.py
================================================
================================================
FILE: Github/legos/github_merge_pull_request/github_merge_pull_request.json
================================================
{
"action_title": "Github Merge Pull Request",
"action_description": "Github Merge Pull Request",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_merge_pull_request",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_PR" ]
}
================================================
FILE: Github/legos/github_merge_pull_request/github_merge_pull_request.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from github import GithubException, BadCredentialsException, UnknownObjectException
class InputSchema(BaseModel):
owner: str = Field(
description='Username of the GitHub user. Eg: "johnwick"',
title='Owner'
)
repository: str = Field(
description='Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"',
title='Repository',
)
pull_request_number: int = Field(
description='Pull request number. Eg: 167',
title='Pull Request Number'
)
commit_message: str = Field(
description='Merge commit message.',
title='Commit Message'
)
def github_merge_pull_request_printer(output):
if output is None:
return
pprint.pprint(output)
def github_merge_pull_request(
handle,
owner:str,
repository:str,
pull_request_number: int,
commit_message:str
) -> str:
"""github_merge_pull_request returns message and commit sha of successfully merged branch
Note- The base branch is considered to be "master"
:type handle: object
:param handle: Object returned from task.validate(...).
:type owner: string
:param owner: Username of the GitHub user. Eg: "johnwick"
:type repository: string
:param repository: Name of the GitHub repository. Eg: "Awesome-CloudOps-Automation"
:type pull_request_number: int
:param pull_request_number: Pull request number. Eg:167
:type commit_message: str
:param commit_message: Commit Message
:rtype: String of details with message of successfully merged branch
"""
pr_number = int(pull_request_number)
try:
owner = handle.get_user(owner)
repo_name = owner.login+'/'+repository
repo = handle.get_repo(repo_name)
p = repo.get_pull(pr_number)
commit = repo.merge(base="master", head=p.head.sha, commit_message=commit_message)
return f"Successully merged branch with commit SHA- {commit.sha}"
except GithubException as e:
if e.status == 403:
raise BadCredentialsException("You need admin access") from e
if e.status == 404:
raise UnknownObjectException("No such pull number or repository or user found") from e
if e.status==409:
raise Exception("Merge Conflict") from e
raise e.data
except Exception as e:
raise e
================================================
FILE: Github/legos/github_remove_member_from_org/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Github/legos/github_remove_member_from_org/__init__.py
================================================
================================================
FILE: Github/legos/github_remove_member_from_org/github_remove_member_from_org.json
================================================
{
"action_title": "Github Remove Member from Organization",
"action_description": "Remove a member from a Github Organization",
"action_type": "LEGO_TYPE_GITHUB",
"action_entry_function": "github_remove_member_from_org",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_GITHUB","CATEGORY_TYPE_GITHUB_USER", "CATEGORY_TYPE_GITHUB_ORG" ]
}
================================================
FILE: Github/legos/github_remove_member_from_org/github_remove_member_from_org.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from github import GithubException, BadCredentialsException, UnknownObjectException
class InputSchema(BaseModel):
organization_name: str = Field(
description='Name of Github Organization. Eg: "unskript"',
title='Organization Name',
)
username: str = Field(
description='Organization member\'s username. Eg: "jane-mitch-unskript"',
title='Member\'s Username',
)
def github_remove_member_from_org_printer(output):
if output is None:
return
pprint.pprint(output)
def github_remove_member_from_org(handle, organization_name:str, username:str)-> str:
"""github_remove_member_from_org returns the status to remove a member
:type organization_name: string
:param organization_name: Name of Github Organization. Eg: "unskript"
:type username: string
:param username: Organization member's username. Eg: "jane-mitch-unskript"
:rtype: List of return status of removing a member from Org
"""
result = ""
organization = handle.get_organization(organization_name)
try:
user = handle.get_user(username)
mem_exist = organization.has_in_members(user)
if mem_exist:
result = organization.remove_from_members(user)
except GithubException as e:
if e.status == 403:
raise BadCredentialsException("You need admin access") from e
if e.status == 404:
raise UnknownObjectException("No organization or user found") from e
raise e.data
except Exception as e:
raise e
if result is None:
return f"Successfully removed user {username}"
return None
================================================
FILE: Grafana/README.md
================================================
# Grafana Actions
* [Get Grafana Handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Grafana/legos/grafana_get_handle/README.md): Get Grafana Handle
* [Grafana List Alerts](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Grafana/legos/grafana_list_alerts/README.md): List of Grafana alerts. Specifying the dashboard ID will show alerts in that dashboard
================================================
FILE: Grafana/__init__.py
================================================
================================================
FILE: Grafana/legos/__init__.py
================================================
================================================
FILE: Grafana/legos/grafana_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Hadoop/legos/hadoop_get_handle/__init__.py
================================================
================================================
FILE: Hadoop/legos/hadoop_get_handle/hadoop_get_handle.json
================================================
{
"action_title": "Get Hadoop handle",
"action_description": "Get Hadoop handle",
"action_type": "LEGO_TYPE_HADOOP",
"action_entry_function": "hadoop_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_output_type": "ACTION_OUTPUT_TYPE_NONE",
"action_supports_iteration": false,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_HADOOP"]
}
================================================
FILE: Hadoop/legos/hadoop_get_handle/hadoop_get_handle.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def hadoop_get_handle(handle) -> None:
"""hadoop_get_handle returns the Hadoop session handle.
:rtype: Hadoop handle.
"""
return handle
================================================
FILE: Jenkins/Fetch_Jenkins_Build_Logs.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"id": "6e5315be",
"metadata": {},
"source": [
"\n",
"In this RunBook, We will graph MTTR of issues in Jira.
\n", "\n", "
Since many teams track DevOps issues in Jira, this is a great way to understand how quickly issues are getting resolved, and if the MTTR is improving.
\n", "\n", "
First we will get a static pull of all the issues in Jira. This is fine if your data set is small, but we'll also generate the graph dynamically - so that the data pulled from Jira is never \"too big.\"
" ] }, { "cell_type": "markdown", "id": "f0a1f3b5-4494-4533-811b-5dd36e5e4d46", "metadata": { "jupyter": { "source_hidden": false }, "name": "Configure the JQL query", "orderProperties": [], "tags": [], "title": "Configure the JQL query" }, "source": [ "By defining the JQL query in this way, we can reuse the \"get issues from JIRA\" Action with different start and end times to pull different timeframes from Jira.
\n", "\n", "
For the static chart, we use a big start and end time, to pull all the data in.
" ] }, { "cell_type": "code", "execution_count": null, "id": "42d705d5-69c3-4671-9ede-9891f1584aac", "metadata": { "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-27T21:48:04.101Z" }, "name": "JQL Query Variable", "orderProperties": [], "tags": [], "title": "JQL Query Variable", "credentialsJson": {} }, "outputs": [], "source": [ "start = start_date\n", "end = end_date\n", "#global jql_query\n", "jql_query=\"\"\n", "def create_query(jira_project, issue_type, new_status, start, end) -> str: \n", " \n", " #global jql_query\n", " return f'project = {jira_project} and issueType = {issue_type} and status changed to {new_status} during (\"{start}\",\"{end}\")'\n", "jql_query = create_query(jira_project, issue_type, new_status, start, end)\n", "print(jql_query)\n" ], "output": {} }, { "cell_type": "markdown", "id": "55af3eea-f53f-41ba-83f5-c0f1c58a8d1f", "metadata": { "jupyter": { "source_hidden": false }, "name": "Query JQL", "orderProperties": [], "tags": [], "title": "Query JQL" }, "source": [ "We;ve created the JQL query - this pre-built Action just pulls the requested data from Jira.:
project = EN and issueType = Bug and status changed to Done during ('2022/01/01','2023/01/08')\n",
"\n", "
This query pulls all bgs from the EN project that were completed from 1/1/2022 - 1/8/2023.
" ] }, { "cell_type": "code", "execution_count": null, "id": "cf3db291-fea3-4dbe-94c4-d15a8d906cd0", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionOutputType": null, "actionRequiredLinesInCode": [], "actionSupportsIteration": false, "actionSupportsPoll": false, "action_modified": false, "action_uuid": "23c4b5c86be9cfdbc7cbfce6d90ed089b7dc61d6dbc219aae3f4cc08862d3934", "continueOnError": false, "credentialsJson": {}, "description": "Use JQL to search all matching issues in Jira. Returns a List of the matching issues IDs/keys", "execution_data": { "last_date_success_run_cell": "2023-02-27T21:48:16.314Z" }, "id": 7, "index": 7, "inputData": [ { "jql": { "constant": false, "value": "jql_query" }, "max_results": { "constant": false, "value": "" } } ], "inputschema": [ { "properties": { "jql": { "description": "Search string to execute in JIRA. Valid JQL expression eg \"project = EN and status in (\"Selected for Development\") AND labels in (beta)\"", "title": "Jira issue search using Jira Query Languagae (JQL)", "type": "string" }, "max_results": { "default": 5, "description": "Max limit on number of matching issues", "title": "Limit number of matching issues", "type": "integer" } }, "required": [ "jql" ], "title": "jira_search_issue", "type": "object" } ], "language": "python", "legotype": "LEGO_TYPE_JIRA", "metadata": { "action_bash_command": false, "action_description": "Use JQL to search all matching issues in Jira. Returns a List of the matching issues IDs/keys", "action_entry_function": "jira_search_issue", "action_needs_credential": true, "action_nouns": null, "action_output_type": "ACTION_OUTPUT_TYPE_NONE", "action_supports_iteration": true, "action_supports_poll": true, "action_title": "Search for Jira issues matching JQL queries", "action_type": "LEGO_TYPE_JIRA", "action_verbs": null, "action_version": "1.0.0" }, "name": "Search Jira Issues with JQL", "orderProperties": [ "jql", "max_results" ], "outputParams": { "output_name": "issueList", "output_name_enabled": true }, "printOutput": false, "tags": [ "jira_search_issue" ], "title": "Search Jira Issues with JQL", "uuid": "23c4b5c86be9cfdbc7cbfce6d90ed089b7dc61d6dbc219aae3f4cc08862d3934", "version": "1.0.0" }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from jira import JIRA, Issue\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, List, Dict\n", "import pprint\n", "\n", "pp = pprint.PrettyPrinter(indent=4)\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def legoPrinter(func):\n", " def Printer(*args, **kwargs):\n", " matching_issues = func(*args, **kwargs)\n", " print('\\n')\n", " #for issue in matching_issues:\n", " # print('ID:{}: Summary:{} Description:{}'.format(\n", " # issue.key, issue.fields.summary, issue.fields.description))\n", " #print(issue)\n", "\n", " return matching_issues\n", " return Printer\n", "\n", "\n", "@legoPrinter\n", "@beartype\n", "def jira_search_issue(handle: JIRA, jql: str, max_results: int = 0) -> List:\n", " \"\"\"jira_search_issue get Jira issues matching JQL queries.\n", " :type jql: str\n", " :param jql: Search string to execute in JIRA.\n", "\n", " :type max_results: int\n", " :param max_results: Max limit on number of matching issues\n", "\n", " :rtype: Jira issues matching JQL queries\n", " \"\"\"\n", " print(\"jql search lego\",jql)\n", " matching_issues = handle.search_issues(jql, maxResults=max_results)\n", "\n", " return matching_issues\n", "\n", "\n", "def unskript_default_printer(output):\n", " if isinstance(output, (list, tuple)):\n", " for item in output:\n", " print(f'item: {item}')\n", " elif isinstance(output, dict):\n", " for item in output.items():\n", " print(f'item: {item}')\n", " else:\n", " print(f'Output for {task.name}')\n", " print(output)\n", "\n", "task = Task(Workflow())\n", "\n", "task.configure(inputParamsJson='''{\n", " \"jql\": \"jql_query\"\n", " }''')\n", "\n", "task.configure(outputName=\"issueList\")\n", "task.configure(printOutput=False)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(jira_search_issue, lego_printer=unskript_default_printer, hdl=hdl, args=args)" ], "output": {} }, { "cell_type": "markdown", "id": "48f39e74-8285-4252-ac8c-fce2ae30c841", "metadata": { "jupyter": { "source_hidden": false }, "name": "Data into a Dict", "orderProperties": [], "tags": [], "title": "Data into a Dict" }, "source": [ "In this Action - we convert the object from Jira into a Dict, and we add the elapsed time.
\n", "\n", "
This is the time from the bug being opened to the status changed to closed. We save this as a timedelta, but also convert the timedelta into hours - adding the days (*24) and seconds (/3600) of the timedelta so that we can see how many hours the ticket was open.
\n", "\n", "
We also count the number of issues in the Dict and print that value.
" ] }, { "cell_type": "code", "execution_count": null, "id": "76777827-9c79-4965-96b7-871bfe9cbf0d", "metadata": { "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-27T20:37:19.664Z" }, "jupyter": { "source_hidden": true }, "name": "data into dict", "orderProperties": [], "tags": [], "title": "data into dict", "credentialsJson": {} }, "outputs": [], "source": [ "from datetime import datetime\n", "def create_dict(issueList):\n", "\n", " issue_data = {}\n", " counter =0\n", " for issue in issueList:\n", " counter +=1\n", " create_time = datetime.strptime(issue.fields.created, '%Y-%m-%dT%H:%M:%S.%f%z')\n", " done_time = datetime.strptime(issue.fields.updated, '%Y-%m-%dT%H:%M:%S.%f%z')\n", " elapsed_time = done_time-create_time\n", " elapsed_time_hours = round(elapsed_time.days*24,0) +round(elapsed_time.seconds/3600,1)\n", " #print(\"elapsed\", elapsed_time)\n", " assignee = issue.fields.assignee\n", " if hasattr(issue.fields.assignee,'displayName'):\n", " assignee = issue.fields.assignee.displayName\n", " else:\n", " assignee = \"Not assigned\"\n", " issue_data[issue.key] = {#\"summary\": issue.fields.summary, \n", " #\"description\": issue.fields.description,\n", " \"reporter\":issue.fields.reporter.displayName,\n", " \"status\":issue.fields.status.name,\n", " \"issueType\":issue.fields.issuetype.name,\n", " \"project\":issue.fields.project.name,\n", " \"create_time\":create_time,\n", " \"done_time\":done_time,\n", " \"elapsed_time\":elapsed_time,\n", " \"elapsed_time_hours\":elapsed_time_hours,\n", " \"assignee\":assignee\n", " }\n", " print(\"counter\", counter)\n", " return issue_data\n", "issue_data = create_dict(issueList)" ], "output": {} }, { "cell_type": "markdown", "id": "4e25ad3f-0264-4ec4-a1cc-0d193edcc947", "metadata": { "jupyter": { "source_hidden": false }, "name": "Create a data frame and a graph", "orderProperties": [], "tags": [], "title": "Create a data frame and a graph" }, "source": [ "this step is doing a lot.
\n", "\n", "
The above steps are great if your dataset isn't very large. But what if you have thousands of issues? We dont want to make epic JQL queries, and then also hammer the RUnBook with a huge amount of data.
\n", "\n", "
Let's pull the data every time we change the graph. This is REALLY useful for time sensitive data (imagine you need time sensitive data, and pulling a whole day's of data takes forever.). Now, pull the small subset you need in real time when you create the chart
" ] }, { "cell_type": "code", "execution_count": null, "id": "5d939c27-a6ad-4f57-a5c4-b23545187ad8", "metadata": { "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-27T22:45:21.864Z" }, "name": "live data interactive chart", "orderProperties": [], "tags": [], "title": "live data interactive chart", "credentialsJson": {} }, "outputs": [], "source": [ "import json\n", "#create a dataframe from the Jira export\n", "livedata = data\n", "\n", "#data.tail\n", "\n", "def weekdflive(startDay, dayCount, dataframe):\n", " startDay = datetime.combine(startDay, datetime.min.time())\n", " startDay =startDay.replace(tzinfo=timezone.utc)\n", " endDay =startDay+ dt.timedelta(days=dayCount)\n", " #pull data from JIRA\n", " startJira = startDay.strftime(\"%Y/%m/%d\")\n", " endJira = endDay.strftime(\"%Y/%m/%d\")\n", " jql_query =create_query(jira_project, issue_type, new_status, startJira, endJira)\n", " print(\"jql_query\",jql_query)\n", " jsonInput = {\"jql\":jql_query}\n", " stringifiedInput = json.dumps(jsonInput)\n", " print(\"stringifiedInput\",stringifiedInput, type(stringifiedInput))\n", " #inputParamsJson1 = f'''{\"jql\":{{jql_query}}}'''\n", " #print(\"inputParamsJson1\", inputParamsJson1)\n", " \n", "\n", "\n", " task.configure(inputParamsJson='''{\n", " \"jql\": \"jql_query\"\n", " }''')\n", " task.configure(outputName=\"issueList1\")\n", " (err, hdl, args) = task.validate(vars=vars())\n", " task.execute(jira_search_issue, lego_printer=unskript_default_printer, hdl=hdl, args=args)\n", "\n", " print(\"issueList1\", len(issueList1))\n", " live_issue_data = create_dict(issueList1)\n", " livedata=\"\"\n", " livedata = pd.DataFrame.from_dict(live_issue_data)\n", " livedata = livedata.T\n", " print(\"livedata\" ,livedata.size)\n", " \n", " \n", " \n", " weekdf= dataframe[(dataframe[\"create_time\"] >= startDay)&(dataframe[\"create_time\"] <= endDay) ][\"elapsed_time_hours\"].value_counts()\n", " weektitle = \"Defect status by creation date\"\n", " if weekdf.empty:\n", " startDay =dt.datetime(2021, 1, 1,0,0,0)\n", " startDay =startDay.replace(tzinfo=timezone.utc)\n", " dayCount=730\n", " endDay =startDay+ dt.timedelta(days=dayCount)\n", " weektitle = \"no data for this week.\"\n", " weekdf= dataframe[(dataframe[\"create_time\"] >= startDay)&(dataframe[\"create_time\"] <= endDay) ][\"elapsed_time_hours\"].value_counts(bins=4, sort=False)\n", " else:\n", " issueCount = weekdf.sum(0)\n", " numberofBins = 4\n", " if issueCount < 4:\n", " numberofBins = 2\n", " if issueCount > 15:\n", " numberofBins = 8\n", " weekdf= dataframe[(dataframe[\"create_time\"] >= startDay)&(dataframe[\"create_time\"] <= endDay) ][\"elapsed_time_hours\"].value_counts(bins=numberofBins, sort=False)\n", " #print(\"count\", weekdf.sum(0))\n", " return weekdf\n", "\n", "\n", "def time_plotlive(startDay, dayCount, dataframe):\n", " fig = Figure(figsize=(10, 6))\n", " fig.subplots_adjust(bottom=0.45)\n", "\n", " ax = fig.subplots()\n", " ax.xaxis.set_tick_params(labelsize=20)\n", " df1 = weekdflive(startDay, dayCount, dataframe)\n", " FigureCanvas(fig) \n", " df1.plot.bar(x=\"x\", y=\"counts\", ax=ax, title=\"MTTR to closing issues\")\n", " return fig\n", "\n", "\n", "\n", "#build the chart\n", "\n", "#get all our date-time variables correctly formatted with a timezone.\n", "sliderstart = dt.datetime(2022, 1, 1,0,0,0)\n", "sliderstart =sliderstart.replace(tzinfo=timezone.utc)\n", "sliderend = dt.datetime.now()\n", "sliderend =sliderend.replace(tzinfo=timezone.utc)\n", "slidervalue = dt.datetime(2023, 1, 1,0,0,0)\n", "slidervalue =slidervalue.replace(tzinfo=timezone.utc)\n", "#print(\"sliderstart\",sliderstart)\n", "\n", "\n", "#CREATE SLIDERS\n", "startDay = pn.widgets.DateSlider(name='Date Slider', start=sliderstart, end=sliderend, value=slidervalue)\n", "\n", "dayCount = pn.widgets.IntSlider(name='number of days', value=7, start=1, end=180, step = 7)\n", "interactive = pn.bind(time_plotlive, startDay=startDay, dayCount=dayCount, dataframe = livedata)\n", "first_app = pn.Column(startDay, dayCount, interactive)\n", "first_app\n" ], "output": {} } ], "metadata": { "execution_data": { "runbook_name": "Jira Visualize Issue Time to Resolution", "parameters": [ "AMI_Id", "Region" ] }, "kernelspec": { "display_name": "unSkript (Build: 904)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "parameterSchema": { "properties": { "end_date": { "default": "2023/02/22", "description": "End Date for search range", "title": "end_date", "type": "string" }, "issue_type": { "default": "Bug", "description": "Jira issueType to query", "title": "issue_type", "type": "string" }, "jira_project": { "default": "EN", "description": "Jira Project Name", "title": "jira_project", "type": "string" }, "new_status": { "default": "Done", "description": "Status change to search for in Jira", "title": "new_status", "type": "string" }, "start_date": { "default": "2022/01/01", "description": "Start Date for search range.", "title": "start_date", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "e8899eb02dfbc033aab5733bdae1bd213fa031d40331094008e8673d99ebab63" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: Jira/jira_visualize_time_to_resolution.json ================================================ { "name": "Jira Visualize Issue Time to Resolution", "description": "Using the Panel Library - visualize the time it takes for issues to close over a specifict timeframe", "uuid": "1d6f5420dc07075e60bb98018e5447658679ab5b50d7247b4385395a0b6e2989", "icon": "CONNECTOR_TYPE_JIRA", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_JIRA" ], "version": "1.0.0" } ================================================ FILE: Jira/legos/__init__.py ================================================ ================================================ FILE: Jira/legos/jira_add_comment/README.md ================================================ [
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Jira/legos/jira_add_comment/__init__.py
================================================
================================================
FILE: Jira/legos/jira_add_comment/jira_add_comment.json
================================================
{
"action_title": "Jira Add Comment",
"action_description": "Add a Jira Comment",
"action_type": "LEGO_TYPE_JIRA",
"action_entry_function": "jira_add_comment",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_INT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_JIRA"]
}
================================================
FILE: Jira/legos/jira_add_comment/jira_add_comment.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict, Optional
from pydantic import BaseModel, Field
from jira.client import JIRA
class InputSchema(BaseModel):
issue_id: str = Field(
title='JIRA Issue ID',
description='Issue ID. Eg EN-1234'
)
comment: str = Field(
title='Comment',
description='Comment to add in Jira Issue'
)
visibility: Optional[Dict[str, str]] = Field(
None,
title='Visibility',
description='''a dict containing two entries: "type" and "value".
"type" is 'role' (or 'group' if the Jira server has configured comment visibility for groups)
"value" is the name of the role (or group) to which viewing of this comment
will be restricted.'''
)
is_internal: Optional[bool] = Field(
False,
title='Internal',
description=('True marks the comment as \'Internal\' in Jira Service Desk '
'(Default: ``False``)')
)
def jira_add_comment_printer(output):
if output is None:
return
pprint.pprint(output)
def jira_add_comment(hdl: JIRA,
issue_id: str,
comment: str,
visibility: Dict[str, str] = None,
is_internal: bool = False) -> int:
"""jira_get_issue Get Jira Issue Info
:type hdl: JIRA
:param hdl: Jira handle.
:type issue_id: str
:param issue_id: Issue ID.
:type comment: str
:param comment: Comment to add in Jira Issue.
:type visibility: Dict[str, str]
:param visibility: a dict containing two entries: "type" and "value".
"type" is 'role' (or 'group' if the Jira server has configured
comment visibility for groups)
"value" is the name of the role (or group) to which viewing of
this comment will be restricted.
:type is_internal: bool
:param is_internal: True marks the comment as \'Internal\' in Jira
Service Desk (Default: ``False``)
:rtype: Jira comment id (int)
"""
try:
issue = hdl.issue(issue_id)
comment = hdl.add_comment(issue, comment, visibility=visibility, is_internal=is_internal)
except Exception as e:
raise e
return int(comment.id)
================================================
FILE: Jira/legos/jira_assign_issue/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Jira/legos/jira_get_handle/__init__.py
================================================
================================================
FILE: Jira/legos/jira_get_handle/jira_get_handle.json
================================================
{
"action_title": "Get Jira SDK Handle",
"action_description": "Get Jira SDK Handle",
"action_type": "LEGO_TYPE_JIRA",
"action_entry_function": "jira_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_supports_iteration": false
}
================================================
FILE: Jira/legos/jira_get_handle/jira_get_handle.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def jira_get_handle(handle):
"""jira_get_handle returns the jira connection handle.
:rtype: postgresql Handle.
"""
return handle
================================================
FILE: Jira/legos/jira_get_issue/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Jira/legos/jira_get_issue/__init__.py
================================================
================================================
FILE: Jira/legos/jira_get_issue/jira_get_issue.json
================================================
{
"action_title": "Get Jira Issue Info",
"action_description": "Get Issue Info from Jira API: description, labels, attachments",
"action_type": "LEGO_TYPE_JIRA",
"action_entry_function": "jira_get_issue",
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_JIRA"]
}
================================================
FILE: Jira/legos/jira_get_issue/jira_get_issue.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from jira.client import JIRA
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
issue_id: str = Field(
title='JIRA Issue ID',
description='Issue ID. Eg EN-1234'
)
def jira_get_issue_printer(output):
if output is None:
return
pp.pprint(output)
def jira_get_issue(hdl: JIRA, issue_id: str) -> dict:
"""jira_get_issue Get Jira Issue Info
:type issue_id: str
:param issue_id: Issue ID.
:rtype: Jira Issue Info
"""
# Input param validation.
issue = hdl.issue(issue_id)
return issue.raw
================================================
FILE: Jira/legos/jira_get_issue_status/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Jira/legos/jira_get_issue_status/__init__.py
================================================
================================================
FILE: Jira/legos/jira_get_issue_status/jira_get_issue_status.json
================================================
{
"action_title": "Get Jira Issue Status",
"action_description": "Get Issue Status from Jira API",
"action_type": "LEGO_TYPE_JIRA",
"action_entry_function": "jira_get_issue_status",
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_JIRA"]
}
================================================
FILE: Jira/legos/jira_get_issue_status/jira_get_issue_status.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from jira.client import JIRA
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
issue_id: str = Field(
title='Issue ID',
description='Issue ID'
)
def jira_get_issue_status_printer(output):
if output is None:
return
pp.pprint(output)
def jira_get_issue_status(hdl: JIRA, issue_id: str):
"""jira_get_issue_status get issue status
:type issue_id: str
:param issue_id: Issue ID.
:rtype:
"""
# Input param validation.
issue = hdl.issue(issue_id)
return issue.fields.status.name
================================================
FILE: Jira/legos/jira_issue_change_status/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Jira/legos/jira_search_issue/__init__.py
================================================
================================================
FILE: Jira/legos/jira_search_issue/jira_search_issue.json
================================================
{
"action_title": "Search for Jira issues matching JQL queries",
"action_description": "Use JQL to search all matching issues in Jira. Returns a List of the matching issues IDs/keys",
"action_type": "LEGO_TYPE_JIRA",
"action_entry_function": "jira_search_issue",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_example": "[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kafka/legos/kafka_broker_health_check/__init__.py
================================================
================================================
FILE: Kafka/legos/kafka_broker_health_check/kafka_broker_health_check.json
================================================
{
"action_title": "Get Kafka broker health",
"action_description": "Checks the health of the Kafka brokers by determining if the Kafka producer can establish a connection with the bootstrap brokers of a Kafka cluster.",
"action_type": "LEGO_TYPE_KAFKA",
"action_entry_function": "kafka_broker_health_check",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": [
""
],
"action_next_hop_parameter_mapping": {},
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: Kafka/legos/kafka_broker_health_check/kafka_broker_health_check.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from kafka import KafkaProducer, KafkaConsumer
from typing import Tuple
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def kafka_broker_health_check_printer(output):
status, issues = output
if status:
print("All brokers are connected and healthy!")
else:
print("Issues detected with brokers:\n")
for issue in issues:
print(f"Issue Type: {issue['issue_type']}")
print(f"Description: {issue['description']}\n")
def kafka_broker_health_check(handle) -> Tuple:
"""
Checks the health of the Kafka brokers function by determining if the Kafka producer
can establish a connection with the bootstrap brokers of a Kafka cluster.
:type handle: KafkaProducer
:param handle: Handle containing the KafkaProducer instance.
:rtype: Tuple containing a status and an optional list of issues with brokers.
"""
issues = []
# Check the brokers
connected_to_brokers = handle.bootstrap_connected()
if not connected_to_brokers:
issues.append({
'issue_type': 'Broker',
'description': 'Unable to connect to bootstrap brokers.'
})
if len(issues) != 0:
return (False, issues)
return (True, None)
================================================
FILE: Kafka/legos/kafka_check_in_sync_replicas/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kafka/legos/kafka_check_lag_change/__init__.py
================================================
================================================
FILE: Kafka/legos/kafka_check_lag_change/kafka_check_lag_change.json
================================================
{
"action_title": "Kafka check lag change",
"action_description": "This action checks if the lag for consumer groups is not changing for a threshold number of hours.\n",
"action_type": "LEGO_TYPE_KAFKA",
"action_entry_function": "kafka_check_lag_change",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": [
""
],
"action_next_hop_parameter_mapping": {},
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: Kafka/legos/kafka_check_lag_change/kafka_check_lag_change.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from typing import Tuple, Optional
from concurrent.futures import ThreadPoolExecutor, as_completed
from kafka import KafkaAdminClient, KafkaConsumer, TopicPartition
from pydantic import BaseModel, Field
from tabulate import tabulate
import time
class InputSchema(BaseModel):
group_id: Optional[str] = Field(
'',
description='Consumer group ID to which this consumer belongs',
title='Consumer group ID',
)
threshold: Optional[int] = Field(
3,
description="The number of hours to check if the lag hasn't changed.",
title='Threshold (in hours)',
)
def kafka_check_lag_change_printer(output):
status, issues = output
if status:
print("All consumer groups are maintaining their lags!")
else:
print("Lag issues detected:")
headers = ['Consumer Group', 'Topic', 'Partition', 'Description']
table_data = [(issue['consumer_group'], issue['topic'], issue['partition'], issue['description']) for issue in issues]
print(tabulate(table_data, headers=headers, tablefmt='grid'))
# This would be a global or persisted store of previous lags at the last check.
# Format: { "topic-partition": [timestamp, lag] }
prev_lags = {}
def fetch_lag(handle, group_id, topic_partitions, current_time, threshold):
issues = []
# Utilize bootstrap_servers from handle
consumer = KafkaConsumer(bootstrap_servers=handle.config['bootstrap_servers'], group_id=group_id)
try:
for tp in topic_partitions:
end_offset = consumer.end_offsets([tp])[tp]
committed = consumer.committed(tp) or 0
lag = end_offset - committed
if lag == 0:
continue
key = f"{group_id}-{tp.topic}-{tp.partition}"
prev_entry = prev_lags.get(key)
if prev_entry:
prev_timestamp, prev_lag = prev_entry
if prev_lag != lag:
prev_lags[key] = (current_time, lag)
elif (current_time - prev_timestamp) >= threshold * 3600:
issues.append({
'consumer_group': group_id,
'topic': tp.topic,
'partition': tp.partition,
'description': f"Lag hasn't changed for {threshold} hours. Current Lag: {lag}"
})
else:
prev_lags[key] = (current_time, lag)
finally:
consumer.close()
return issues
def kafka_check_lag_change(handle, group_id: str = "", threshold: int = 3) -> Tuple:
"""
kafka_check_lag_change checks if the lag for consumer groups is not changing for X hours.
:param handle: Object of type unSkript KAFKA Connector.
:param group_id: Consumer group ID.
:param threshold: The number of hours to check if the lag hasn't changed.
:return: Tuple containing a status and an optional list of issues with lag.
"""
issues = []
current_time = time.time()
admin_client = KafkaAdminClient(bootstrap_servers=handle.config['bootstrap_servers'])
consumer_groups = [group_id] if group_id else [group[0] for group in admin_client.list_consumer_groups()]
with ThreadPoolExecutor(max_workers=10) as executor:
futures = []
for group in consumer_groups:
consumer = KafkaConsumer(bootstrap_servers=handle.config['bootstrap_servers'], group_id=group)
topics = consumer.topics()
topic_partitions = [TopicPartition(topic, partition) for topic in topics for partition in consumer.partitions_for_topic(topic)]
consumer.close()
if topic_partitions:
future = executor.submit(fetch_lag, handle, group, topic_partitions, current_time, threshold)
futures.append(future)
for future in as_completed(futures):
issues.extend(future.result())
return (False, issues) if issues else (True, None)
================================================
FILE: Kafka/legos/kafka_check_offline_partitions/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kafka/legos/kafka_get_committed_messages_count/__init__.py
================================================
================================================
FILE: Kafka/legos/kafka_get_committed_messages_count/kafka_get_committed_messages_count.json
================================================
{
"action_title": "Kafka get count of committed messages",
"action_description": "Fetches the count of committed messages (consumer offsets) for a specific consumer group and its topics.",
"action_type": "LEGO_TYPE_KAFKA",
"action_entry_function": "kafka_get_committed_messages_count",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":["CATEGORY_TYPE_INFORMATION" , "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_KAFKA"]
}
================================================
FILE: Kafka/legos/kafka_get_committed_messages_count/kafka_get_committed_messages_count.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from kafka import KafkaConsumer, TopicPartition, KafkaAdminClient
from typing import Dict, Optional
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
group_id: Optional[str] = Field(..., description='Consumer group ID to which this consumer belongs', title='Consumer group ID')
def kafka_get_committed_messages_count_printer(output):
if output is None:
print("No data found to get kafka committed messages count ! ")
return
for group_id, topics in output.items():
print(f"Group ID: {group_id}")
for topic_name, partitions in topics.items():
print(f" Topic: {topic_name}")
for partition, number_of_messages in partitions.items():
print(f" Partition {partition}: {number_of_messages} committed messages")
print()
def kafka_get_committed_messages_count(handle, group_id: str = "") -> Dict:
"""
Fetches committed messages (consumer offsets) for all consumer groups and topics,
or for a specific group if provided.
"""
admin_client = KafkaAdminClient(bootstrap_servers=handle.config['bootstrap_servers'])
committed_messages_count = {}
if group_id:
consumer_groups = [group_id]
else:
# Fetch all consumer groups
try:
consumer_groups_info = admin_client.list_consumer_groups()
except Exception as e:
print(f"An error occured while fetching consumer groups:{e}")
return {}
consumer_groups = [group[0] for group in consumer_groups_info]
for group in consumer_groups:
try:
# Create a consumer for each group to fetch topics
consumer = KafkaConsumer(bootstrap_servers=handle.config['bootstrap_servers'], group_id=group)
topics = consumer.topics()
except Exception as e:
print(f"An error occurred while fetching topics in consumer group {group} : {e}")
continue
for topic in topics:
try:
partitions = consumer.partitions_for_topic(topic)
except Exception as e:
print(f"An error occurred while fetching partitions for consumer group {group} and topic {topic} : {e}")
continue
for partition in partitions:
try:
tp = TopicPartition(topic, partition)
except:
print(f"An error occurred while fetching partition info for consumer group {group} and topic {topic} : {e}")
continue
# Fetch committed offset for each partition
committed_offset = consumer.committed(tp)
if committed_offset is not None:
# If there's a committed offset, calculate the number of messages
earliest_offset = consumer.beginning_offsets([tp])[tp]
number_of_messages = committed_offset - earliest_offset
committed_messages_count.setdefault(group, {}).setdefault(topic, {})[partition] = number_of_messages
else:
# If no committed offset, assume 0 messages
committed_messages_count.setdefault(group, {}).setdefault(topic, {})[partition] = 0
# Close the consumer after processing to free up resources
consumer.close()
return committed_messages_count
================================================
FILE: Kafka/legos/kafka_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kafka/legos/kafka_get_topic_health/__init__.py
================================================
================================================
FILE: Kafka/legos/kafka_get_topic_health/kafka_get_topic_health.json
================================================
{
"action_title": "Kafka get topic health",
"action_description": "This action fetches the health and total number of messages for the specified topics.",
"action_type": "LEGO_TYPE_KAFKA",
"action_entry_function": "kafka_get_topic_health",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":["CATEGORY_TYPE_INFORMATION" , "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_KAFKA"]
}
================================================
FILE: Kafka/legos/kafka_get_topic_health/kafka_get_topic_health.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from kafka import TopicPartition, KafkaConsumer, KafkaAdminClient
from typing import Dict, Optional
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
group_id: Optional[str] = Field(..., description='Consumer group ID to which this consumer belongs', title='Consumer group ID')
topics: Optional[list] = Field(..., description='List of topic names.', title='List of topics')
def kafka_get_topic_health_printer(output):
if output is None:
print("No data found for the Kafka topic health!")
return
# Iterating through each group in the output
for group_id, topics in output.items():
print(f"Group ID: {group_id}")
# Iterating through each topic in the group
for topic_name, partitions in topics.items():
print(f" Topic: {topic_name}")
# Iterating through each partition in the topic
for partition, info in partitions.items():
# Checking if the topic exists flag is true or false to print accordingly
topic_exists_msg = "Yes" if info["topic_exists"] else "No"
print(f" Partition {partition}: {info['number_of_messages']} messages, Topic exists: {topic_exists_msg}")
print()
def kafka_get_topic_health(handle, group_id: str="", topics: list=[]) -> Dict:
"""
kafka_get_topic_health fetches the health and total number of messages for the specified topics.
:type handle: object
:param handle: Handle containing the KafkaConsumer instance.
:type group_id: str
:param group_id: Consumer group ID
:type topics: list
:param topics: List of topic names.
:rtype: Dictionary containing the health status and number of messages by topic and partition
"""
admin_client = KafkaAdminClient(bootstrap_servers=handle.config['bootstrap_servers'])
topic_health_info = {}
try:
if not group_id:
consumer_groups_info = admin_client.list_consumer_groups()
consumer_groups = [group[0] for group in consumer_groups_info]
else:
consumer_groups = [group_id]
except Exception as e:
print(f"Failed to list consumer groups: {e}")
return {}
for group in consumer_groups:
consumer = KafkaConsumer(bootstrap_servers=handle.config['bootstrap_servers'], group_id=group)
group_topics = topics if topics else list(consumer.topics())
for topic in group_topics:
partitions = consumer.partitions_for_topic(topic)
if not partitions:
topic_health_info.setdefault(group, {})[topic] = {"-1": {"number_of_messages": 0, "topic_exists": False}}
continue
for partition in partitions:
try:
tp = TopicPartition(topic, partition)
earliest_offset = consumer.beginning_offsets([tp])[tp]
latest_offset = consumer.end_offsets([tp])[tp]
number_of_messages = latest_offset - earliest_offset
except Exception as e:
print(f"Failed to fetch offsets for partition {partition} of topic {topic} in group {group}: {e}")
continue
topic_health_info.setdefault(group, {}).setdefault(topic, {})[partition] = {
"number_of_messages": number_of_messages,
"topic_exists": True
}
consumer.close()
return topic_health_info
================================================
FILE: Kafka/legos/kafka_get_topics_with_lag/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kafka/legos/kafka_get_topics_with_lag/__init__.py
================================================
================================================
FILE: Kafka/legos/kafka_get_topics_with_lag/kafka_get_topics_with_lag.json
================================================
{
"action_title": "Kafka get topics with lag",
"action_description": "This action fetches the topics with lag in the Kafka cluster.",
"action_type": "LEGO_TYPE_KAFKA",
"action_entry_function": "kafka_get_topics_with_lag",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": [
""
],
"action_next_hop_parameter_mapping": {},
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_KAFKA"]
}
================================================
FILE: Kafka/legos/kafka_get_topics_with_lag/kafka_get_topics_with_lag.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from kafka import KafkaConsumer, TopicPartition
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from kafka.admin import KafkaAdminClient
import time
class InputSchema(BaseModel):
group_id: Optional[str] = Field(..., description='Consumer group ID', title='Consumer group ID')
threshold: Optional[int] = Field(
10, description='The threshold on the difference between 2 sample sets of lag data collected.', title='Threshold (no. of messages)'
)
sliding_window_interval: Optional[int] = Field(
30, description='The cadence (in seconds) at which the lag data needs to be collected', title='Sliding window interval'
)
def kafka_get_topics_with_lag_printer(output):
print("Topics with lag:")
status, topics_with_lag = output
if status:
print("None of the topics are experiencing a lag")
else:
for item in topics_with_lag:
print(f"Group '{item['group']}' | Topic '{item['topic']}' | Partition {item['partition']}: {item['lag']} lag (no. of messages)")
def kafka_get_topics_with_lag(handle, group_id: str = "", threshold: int = 10, sliding_window_interval = 30) -> Tuple:
"""
kafka_get_topics_with_lag fetches the topics with lag in the Kafka cluster.
:type handle: KafkaProducer
:param handle: Handle containing the KafkaProducer instance.
:type group_id: str
:param group_id: Consumer group ID.
:type threshold: int, optional
:param threshold: Lag threshold for alerting.
:rtype: Status and a List of objects with topics with lag information.
"""
result = []
admin_client = KafkaAdminClient(bootstrap_servers=handle.config['bootstrap_servers'])
if group_id:
consumer_groups = [group_id]
else:
consumer_groups = [group[0] for group in admin_client.list_consumer_groups()]
# cached_kafka_info stores the kafka info like groups, topics, partitions.
# Only end_offsets and committed needs to be fetched to get the latest value.
# Its organized as groups->topics->partitions
cached_kafka_info = {}
# sample_data captures the snapshots for lag data. It stores for each iteration.
# The value stored is group,topic,partition as the key and lag as the value
sample_data = []
sample_data_dict = {}
for group in consumer_groups:
consumer = KafkaConsumer(bootstrap_servers=handle.config['bootstrap_servers'], group_id=group)
if consumer is None:
continue
cached_kafka_info[group] = {'consumer': consumer}
try:
topic = None
partition = None
for topic in consumer.topics():
partitions = consumer.partitions_for_topic(topic)
cached_kafka_info[group].update({'topics': {topic:partitions}})
for partition in partitions:
tp = TopicPartition(topic, partition)
end_offset = consumer.end_offsets([tp])[tp]
committed = consumer.committed(tp)
# Handle the case where committed is None
lag = end_offset - (committed if committed is not None else 0)
key = f'{group}:{topic}:{partition}'
sample_data_dict[key] = lag
except Exception as e:
print(f'First Iteration: An error occurred:{e}, group {group}, topic {topic}, partition {partition}')
sample_data.append(sample_data_dict)
# Second iteration
time.sleep(sliding_window_interval)
for group, value in cached_kafka_info.items():
consumer = value.get('consumer')
if consumer is None:
continue
topics = value.get('topics')
try:
topic = None
partition = None
for topic, partitions in topics.items():
for partition in partitions:
tp = TopicPartition(topic, partition)
end_offset = consumer.end_offsets([tp])[tp]
committed = consumer.committed(tp)
# Handle the case where committed is None
lag = end_offset - (committed if committed is not None else 0)
key = f'{group}:{topic}:{partition}'
sample_data_dict[key] = lag
except Exception as e:
print(f'Second Iteration: An error occurred:{e}, group {group}, topic {topic}, partition {partition}')
consumer.close()
sample_data.append(sample_data_dict)
for key, value in sample_data[0].items():
# Get the value from the second sample, if present
new_value = sample_data[1].get(key)
if new_value is None:
continue
if new_value - value > threshold:
key_split = key.split(":")
result.append({"group": key_split[0], "topic": key_split[1], "partition": key_split[2], "incremental": new_value - value})
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: Kafka/legos/kafka_publish_message/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kafka/legos/kafka_topic_partition_health_check/__init__.py
================================================
================================================
FILE: Kafka/legos/kafka_topic_partition_health_check/kafka_topic_partition_health_check.json
================================================
{
"action_title": "Get Kafka topic partition health",
"action_description": "Checks the health of the Kafka topics and their partitions.This check checks if the topics have any partitions at all.",
"action_type": "LEGO_TYPE_KAFKA",
"action_entry_function": "kafka_topic_partition_health_check",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": [
""
],
"action_next_hop_parameter_mapping": {},
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: Kafka/legos/kafka_topic_partition_health_check/kafka_topic_partition_health_check.py
================================================
from __future__ import annotations
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from kafka import KafkaProducer, KafkaConsumer
from typing import Tuple
from pydantic import BaseModel
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def kafka_topic_partition_health_check_printer(output):
status, issues = output
if status:
print("All topics and partitions are healthy!")
else:
print("Issues detected with topics or partitions:\n")
for issue in issues:
print(f"Issue Type: {issue['issue_type']}")
print(f"Description: {issue['description']}\n")
def kafka_topic_partition_health_check(handle) -> Tuple:
"""
Checks the health of the Kafka topics and their partitions.This check checks if the topics have any partitions at all.
:type handle: KafkaProducer
:param handle: Handle containing the KafkaProducer instance.
:rtype: Status, Tuple containing a status and an optional list of issues with topics and their partitions.
"""
issues = []
# Using KafkaConsumer to get topic details
consumer = KafkaConsumer(bootstrap_servers=handle.config['bootstrap_servers'])
for topic in consumer.topics():
partitions = consumer.partitions_for_topic(topic)
if not partitions or len(partitions) == 0:
issues.append({
'issue_type': f'Topic: {topic}',
'description': 'No partitions available.'
})
consumer.close()
if len(issues) != 0:
return (False, issues)
return (True, None)
================================================
FILE: Keycloak/__init__.py
================================================
================================================
FILE: Keycloak/legos/keycloak_get_audit_report/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Keycloak/legos/keycloak_get_audit_report/__init__.py
================================================
================================================
FILE: Keycloak/legos/keycloak_get_audit_report/keycloak_get_audit_report.json
================================================
{
"action_title": "Get Keycloak audit report",
"action_description": "Fetches the audit events from Keycloak.",
"action_type": "LEGO_TYPE_KEYCLOAK",
"action_entry_function": "keycloak_get_audit_report",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":["CATEGORY_TYPE_INFORMATION","CATEGORY_TYPE_SRE","CATEGORY_TYPE_KEYCLOAK"]
}
================================================
FILE: Keycloak/legos/keycloak_get_audit_report/keycloak_get_audit_report.py
================================================
#
# Copyright (c) 2024 unSkript.com
# All rights reserved.
#
import requests
import os
from typing import List
from tabulate import tabulate
from pydantic import BaseModel
from datetime import datetime
class InputSchema(BaseModel):
pass
def keycloak_get_audit_report_printer(output):
if not output:
print("No audit events found.")
return
# Extract relevant event data for tabulation
table_data = [["Time", "Type", "User ID", "Client ID", "IP Address", "Error"]]
for event in output:
time = event.get('time', '')
_type = event.get('type') if event.get('type') else \
event.get('operationType') if event.get('operationType') else ''
user_id = event.get('userId') if event.get('userId') else \
event.get('authDetails').get('userId', '') if event.get('authDetails') else ''
client_id = event.get('clientId') if event.get('clientId') else \
event.get('authDetails').get('clientId', '') if event.get('authDetails') else ''
ip_addr = event.get('ipAddress') if event.get('ipAddress') else \
event.get('authDetails').get('ipAddress', '') if event.get('authDetails') else ''
error = event.get('error', '')
table_data.append([datetime.fromtimestamp(time/1000).strftime('%Y-%m-%d %H:%M:%S'),
_type,
user_id,
client_id,
ip_addr,
error])
print(tabulate(table_data, headers='firstrow', tablefmt="grid"))
def keycloak_get_audit_report(handle):
"""
keycloak_get_audit_report fetches the audit events from Keycloak.
:type handle: KeycloakAdmin
:param handle: Handle containing the KeycloakAdmin instance.
:rtype: List of dictionaries representing the audit events.
"""
try:
# Exception could occur if keycloak package was not found
# in such case try if we can import UnskriptKeycloakWrapper
from unskript.connectors.keycloak import UnskriptKeycloakWrapper
from unskript.legos.utils import get_keycloak_token
if not isinstance(handle, UnskriptKeycloakWrapper):
raise ValueError("Unable to Find Keycloak Package!")
access_token = get_keycloak_token(handle)
events_url = os.path.join(handle.server_url, f"admin/realms/{handle.realm_name}/events")
headers = {"Authorization": f"Bearer {access_token}"}
response = requests.get(events_url, headers=headers)
response.raise_for_status()
events = response.json()
return events if events else []
except Exception as e:
print(f"ERROR: Unable to connect to keycloak server {str(e)}")
================================================
FILE: Keycloak/legos/keycloak_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Keycloak/legos/keycloak_get_service_health/__init__.py
================================================
================================================
FILE: Keycloak/legos/keycloak_get_service_health/keycloak_get_service_health.json
================================================
{
"action_title": "Get Keycloak service health",
"action_description": "Fetches the health of the Keycloak service by trying to list available realms.",
"action_type": "LEGO_TYPE_KEYCLOAK",
"action_entry_function": "keycloak_get_service_health",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": [
""
],
"action_next_hop_parameter_mapping": {},
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":["CATEGORY_TYPE_SRE","CATEGORY_TYPE_KEYCLOAK"]
}
================================================
FILE: Keycloak/legos/keycloak_get_service_health/keycloak_get_service_health.py
================================================
#
# Copyright (c) 2024 unSkript.com
# All rights reserved.
#
import requests
import os
from typing import Tuple
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def keycloak_get_service_health(handle):
"""
keycloak_get_service_health fetches the health of the Keycloak service by trying to list available realms.
:type handle: object
:param handle: Handle containing the KeycloakAdmin instance.
:rtype: Tuple indicating if the service is healthy and a list of available realms (or None if healthy).
"""
try:
from unskript.connectors.keycloak import UnskriptKeycloakWrapper
from unskript.legos.utils import get_keycloak_token
if not isinstance(handle, UnskriptKeycloakWrapper):
raise ValueError("Unable to Find Keycloak Package!")
access_token = get_keycloak_token(handle)
realms_url = os.path.join(handle.server_url, "admin/realms")
headers = {"Authorization": f"Bearer {access_token}"}
response = requests.get(realms_url, headers=headers)
response.raise_for_status()
available_realms = response.json()
result = False
if handle.realm_name and available_realms:
result = any(realm.get("realm") == handle.realm_name for realm in available_realms)
if not result:
return (False, available_realms)
return (True, None)
except Exception as e:
print(f"ERROR: Unable to connect to keycloak server {str(e)}")
def keycloak_get_service_health_printer(output):
is_healthy, realms = output
if is_healthy:
print("Keycloak Service is Healthy.")
else:
print("Keycloak Service is Unhealthy.")
if realms:
print("\nUnavailable Realms:")
for realm in realms:
print(f" - {realm}")
================================================
FILE: Kubernetes/Delete_Evicted_Pods_From_Namespaces.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"id": "ed972c43-e797-4fe7-8e90-386d7af0b950",
"metadata": {
"jupyter": {
"source_hidden": false
},
"name": "Steps Overview",
"orderProperties": [],
"tags": [],
"title": "Steps Overview"
},
"source": [
"1) Get system config map
2) Post slack message
This custom action changes the type of namespace and config_map_name from None to String if no namespace is given
" ] }, { "cell_type": "code", "execution_count": 3, "id": "a10ef57b-c2d3-4b48-b7e5-de180785881a", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-12T07:17:30.635Z" }, "jupyter": { "source_hidden": true }, "name": "Convert namespace to String if empty", "orderProperties": [], "tags": [], "title": "Convert namespace to String if empty", "trusted": true }, "outputs": [], "source": [ "if namespace == None:\n", " namespace = ''" ] }, { "cell_type": "markdown", "id": "521a1162-8c06-45eb-b938-32c7cc52be38", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 1", "orderProperties": [], "tags": [], "title": "Step 1" }, "source": [ "This action gets the ConfigMap object for a given namespace or config map name. If neither is specified, namespace is considered to be \"all\".
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 4, "id": "f1140b24-7721-4808-a8da-12e59bd34b27", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "0c59f81ba7694bc31e1a0e856340ce9545d4d4a3562d2c61659500950751b16a", "collapsed": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Get k8s kube system config map", "execution_data": { "last_date_success_run_cell": "2023-02-12T07:18:20.599Z" }, "id": 56, "index": 56, "inputData": [ { "config_map_name": { "constant": false, "value": "" }, "namespace": { "constant": false, "value": " namespace" } } ], "inputschema": [ { "properties": { "config_map_name": { "description": "Kubernetes Config Map Name", "title": "Config Map", "type": "string" }, "namespace": { "description": "Kubernetes namespace", "title": "Namespace", "type": "string" } }, "title": "k8s_get_config_map_kube_system", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Get k8s kube system config map", "nouns": [], "orderProperties": [ "namespace", "config_map_name" ], "output": { "type": "" }, "outputParams": { "output_name": "config_map_details", "output_name_enabled": true }, "printOutput": true, "probeEnabled": false, "tags": [ "k8s_get_config_map_kube_system" ], "trusted": true, "verbs": [] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from typing import Optional, List, Tuple\n", "from kubernetes import client\n", "from pydantic import BaseModel, Field\n", "from tabulate import tabulate\n", "from unskript.legos.kubernetes.k8s_kubectl_command.k8s_kubectl_command import k8s_kubectl_command\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_get_config_map_kube_system_printer(output):\n", " if output is None:\n", " return\n", " for x in output:\n", " for k,v in x.items():\n", " if k=='details':\n", " for config in v:\n", " data_set_1 = []\n", " data_set_1.append(\"Name:\")\n", " data_set_1.append(config.metadata.name)\n", "\n", " data_set_2 = []\n", " data_set_2.append(\"Namespace:\")\n", " data_set_2.append(config.metadata.namespace)\n", "\n", " data_set_3 = []\n", " data_set_3.append(\"Labels:\")\n", " data_set_3.append(config.metadata.labels)\n", "\n", " data_set_4 = []\n", " data_set_4.append(\"Annotations:\")\n", " data_set_4.append(config.metadata.annotations)\n", "\n", " data_set_5 = []\n", " data_set_5.append(\"Data:\")\n", " data_set_5.append(config.data)\n", "\n", " tabular_config_map = []\n", " tabular_config_map.append(data_set_1)\n", " tabular_config_map.append(data_set_2)\n", " tabular_config_map.append(data_set_3)\n", " tabular_config_map.append(data_set_4)\n", " tabular_config_map.append(data_set_5)\n", "\n", " print(tabulate(tabular_config_map, tablefmt=\"github\"))\n", "\n", "\n", "@beartype\n", "def k8s_get_config_map_kube_system(handle, config_map_name: str = '', namespace: str = None)->List:\n", " \"\"\"k8s_get_config_map_kube_system get kube system config map\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type config_map_name: str\n", " :param config_map_name: Kubernetes Config Map Name.\n", "\n", " :type namespace: str\n", " :param namespace: Kubernetes namespace.\n", "\n", " :rtype: List of system kube config maps for a given namespace\n", " \"\"\"\n", " all_namespaces = [namespace]\n", " cmd = f\"kubectl get ns --no-headers -o custom-columns=':metadata.name'\"\n", " if namespace is None or len(namespace)==0:\n", " kubernetes_namespaces = k8s_kubectl_command(handle=handle,kubectl_command=cmd )\n", " replaced_str = kubernetes_namespaces.replace(\"\\n\",\" \")\n", " stripped_str = replaced_str.strip()\n", " all_namespaces = stripped_str.split(\" \")\n", " result = []\n", " coreApiClient = client.CoreV1Api(api_client=handle)\n", " for n in all_namespaces:\n", " config_map_dict = {}\n", " res = coreApiClient.list_namespaced_config_map(\n", " namespace=n, pretty=True)\n", " if len(res.items) > 0:\n", " if config_map_name:\n", " config_maps = list(\n", " filter(lambda x: (x.metadata.name == config_map_name), res.items))\n", " else:\n", " config_maps = res.items\n", " config_map_dict[\"namespace\"] = n\n", " config_map_dict[\"details\"] = config_maps\n", " result.append(config_map_dict)\n", " return result\n", "\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"namespace\": \" namespace\"\n", " }''')\n", "task.configure(outputName=\"config_map_details\")\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_get_config_map_kube_system, lego_printer=k8s_get_config_map_kube_system_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "0a404d88-fa37-4716-86cd-fa6d82c298d2", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "Action takes the following parameters (Optional) :
\n", "namespace, config_map_name
Action gives the following output (Optional) :config_map_details
This action posts a slack message of the config map retrieved in Step 1. This action will only run if the channel_name is specified in the parameters.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "d616410e-1051-463c-a1ae-4b7d1162e823", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "6a87f83ab0ecfeecb9c98d084e2b1066c26fa64be5b4928d5573a5d60299802d", "condition_enabled": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Post Slack Message", "id": 78, "index": 78, "inputData": [ { "channel": { "constant": false, "value": "channel_name" }, "message": { "constant": false, "value": "f\"Config map for namespace:{namespace}: {config_map_details}\"" } } ], "inputschema": [ { "properties": { "channel": { "description": "Name of slack channel.", "title": "Channel", "type": "string" }, "message": { "description": "Message for slack channel.", "title": "Message", "type": "string" } }, "required": [ "channel", "message" ], "title": "slack_post_message", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_SLACK", "name": "Post Slack Message", "nouns": [], "orderProperties": [ "channel", "message" ], "output": { "type": "" }, "printOutput": true, "startcondition": "if len(channel_name)!=0", "tags": [ "slack_post_message" ], "verbs": [] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "\n", "import pprint\n", "\n", "from pydantic import BaseModel, Field\n", "from slack_sdk import WebClient\n", "from slack_sdk.errors import SlackApiError\n", "\n", "pp = pprint.PrettyPrinter(indent=2)\n", "\n", "from beartype import beartype\n", "\n", "from beartype import beartype\n", "@beartype\n", "def slack_post_message_printer(output):\n", " if output is not None:\n", " pprint.pprint(output)\n", " else:\n", " return\n", "\n", "\n", "@beartype\n", "@beartype\n", "def slack_post_message(\n", " handle: WebClient,\n", " channel: str,\n", " message: str) -> str:\n", "\n", " try:\n", " response = handle.chat_postMessage(\n", " channel=channel,\n", " text=message)\n", " return f\"Successfuly Sent Message on Channel: #{channel}\"\n", " except SlackApiError as e:\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.response['error']}\")\n", " if e.response['error'] == 'channel_not_found':\n", " raise Exception('Channel Not Found')\n", " elif e.response['error'] == 'duplicate_channel_not_found':\n", " raise Exception('Channel associated with the message_id not valid')\n", " elif e.response['error'] == 'not_in_channel':\n", " raise Exception('Cannot post message to channel user is not in')\n", " elif e.response['error'] == 'is_archived':\n", " raise Exception('Channel has been archived')\n", " elif e.response['error'] == 'msg_too_long':\n", " raise Exception('Message text is too long')\n", " elif e.response['error'] == 'no_text':\n", " raise Exception('Message text was not provided')\n", " elif e.response['error'] == 'restricted_action':\n", " raise Exception('Workspace preference prevents user from posting')\n", " elif e.response['error'] == 'restricted_action_read_only_channel':\n", " raise Exception('Cannot Post message, read-only channel')\n", " elif e.response['error'] == 'team_access_not_granted':\n", " raise Exception('The token used is not granted access to the workspace')\n", " elif e.response['error'] == 'not_authed':\n", " raise Exception('No Authtnecition token provided')\n", " elif e.response['error'] == 'invalid_auth':\n", " raise Exception('Some aspect of Authentication cannot be validated. Request denied')\n", " elif e.response['error'] == 'access_denied':\n", " raise Exception('Access to a resource specified in the request denied')\n", " elif e.response['error'] == 'account_inactive':\n", " raise Exception('Authentication token is for a deleted user')\n", " elif e.response['error'] == 'token_revoked':\n", " raise Exception('Authentication token for a deleted user has been revoked')\n", " elif e.response['error'] == 'no_permission':\n", " raise Exception('The workspace toekn used does not have necessary permission to send message')\n", " elif e.response['error'] == 'ratelimited':\n", " raise Exception('The request has been ratelimited. Retry sending message later')\n", " elif e.response['error'] == 'service_unavailable':\n", " raise Exception('The service is temporarily unavailable')\n", " elif e.response['error'] == 'fatal_error':\n", " raise Exception('The server encountered catostrophic error while sending message')\n", " elif e.response['error'] == 'internal_error':\n", " raise Exception('The server could not complete operation, likely due to transietn issue')\n", " elif e.response['error'] == 'request_timeout':\n", " raise Exception('Sending message error via POST: either message was missing or truncated')\n", " else:\n", " raise Exception(f'Failed Sending Message to slack channel {channel} Error: {e.response[\"error\"]}')\n", "\n", " except Exception as e:\n", " print(\"\\n\\n\")\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.__str__()}\")\n", " return f\"Unable to send message on {channel}\"\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"channel\": \"channel_name\",\n", " \"message\": \"f\\\\\"Config map for namespace:{namespace}: {config_map_details}\\\\\"\"\n", " }''')\n", "\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"if len(channel_name)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(slack_post_message, lego_printer=slack_post_message_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "b92d99e6-2735-4d4c-b20e-358ee36e6243", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "Action takes the following parameters (Optional) :
\n", "channel_name, message
In this Runbook, we were able to get the Kube ConfigMap and post a Slack message with the ConfigMap details. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "parameters": [ "channel_name", "namespace" ], "runbook_name": "k8s: Get kube system config map" }, "kernelspec": { "display_name": "Python 3.9.6 64-bit", "language": "python", "name": "python3" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "parameterSchema": { "properties": { "channel_name": { "description": "Slack channel to post the details to. Eg: \"general\"", "title": "channel_name", "type": "string" }, "namespace": { "description": "Name of the namespace to fetch system config map. If left empty, it will fetch for all. Eg: \"logging\"", "title": "namespace", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": null, "vscode": { "interpreter": { "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: Kubernetes/Get_Kube_System_Config_Map.json ================================================ { "name": "k8s: Get kube system config map", "description": "This runbook fetches the kube system config map for a k8s cluster and publishes the information on a Slack channel.", "uuid": "3fd89891a2b968e4422632e121c72ece82ef51b09822df7fcf734e9a14ed9e5c", "icon": "CONNECTOR_TYPE_K8S", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_K8S" ], "version": "1.0.0" } ================================================ FILE: Kubernetes/K8S_Delete_Pods_From_Failing_Jobs.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "ed972c43-e797-4fe7-8e90-386d7af0b950", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Get failing pods from all jobs.
2) Delete the pod
If a job doesn’t exit cleanly (whether it finished successfully or not) the pod is left in a terminated or errored state. After some rounds of runs, these extra pods can quickly exhaust iptables’ available IP addresses in the cluster. This action fetches all the pods that are not in the running state from a scheduled job.
\n", "\n", "\n", "Input parameters:
\n", "namespace (Optional)
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "4450bc10-5a2b-4985-8d33-92d19c2f1acf", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_TROUBLESHOOTING", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_K8S", "CATEGORY_TYPE_K8S_POD" ], "actionDescription": "Get Kubernetes Error PODs from All Jobs", "actionEntryFunction": "k8s_get_error_pods_from_all_jobs", "actionIsCheck": true, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_LIST", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Get Kubernetes Error PODs from All Jobs", "actionType": "LEGO_TYPE_K8S", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "d7a1da167d056a912739fce8c4571c6863050f52d6e19495971277057e709857", "collapsed": true, "condition_enabled": true, "continueOnError": false, "credentialsJson": {}, "description": "Get Kubernetes Error PODs from All Jobs", "id": 2, "index": 2, "inputData": [ { "namespace": { "constant": false, "value": "namespace" } } ], "inputschema": [ { "properties": { "namespace": { "default": "", "description": "k8s Namespace", "title": "Namespace", "type": "string" } }, "required": [], "title": "k8s_get_error_pods_from_all_jobs", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_K8S", "name": "Get Kubernetes Error PODs from All Jobs", "orderProperties": [ "namespace" ], "outputParams": { "output_name": "unhealthy_pods", "output_name_enabled": true, "output_runbook_enabled": false, "output_runbook_name": "" }, "printOutput": true, "startcondition": "not pod_names", "tags": [ "k8s_get_error_pods_from_all_jobs" ], "title": "Get Kubernetes Error PODs from All Jobs", "uuid": "d7a1da167d056a912739fce8c4571c6863050f52d6e19495971277057e709857", "version": "1.0.0" }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2023 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "import pprint\n", "from typing import Tuple, Optional\n", "from pydantic import BaseModel, Field\n", "from kubernetes.client.rest import ApiException\n", "from kubernetes import client, watch\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_get_error_pods_from_all_jobs_printer(output):\n", " if output is None:\n", " return\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def k8s_get_error_pods_from_all_jobs(handle, namespace:str=\"\") -> Tuple:\n", " \"\"\"k8s_get_error_pods_from_all_jobs This check function uses the handle's native command\n", " method to execute a pre-defined kubectl command and returns the output of list of error pods\n", " from all jobs.\n", "\n", " :type handle: Object\n", " :param handle: Object returned from the task.validate(...) function\n", "\n", " :rtype: Tuple Result in tuple format.\n", " \"\"\"\n", " result = []\n", " coreApiClient = client.CoreV1Api(api_client=handle)\n", " BatchApiClient = client.BatchV1Api(api_client=handle)\n", " # If namespace is provided, get jobs from the specified namespace\n", " if namespace:\n", " jobs = BatchApiClient.list_namespaced_job(namespace,watch=False, limit=200).items\n", " # If namespace is not provided, get jobs from all namespaces\n", " else:\n", " jobs = BatchApiClient.list_job_for_all_namespaces(watch=False, limit=200).items\n", "\n", " for job in jobs:\n", " # Fetching all the pods associated with the current job\n", " pods = coreApiClient.list_namespaced_pod(job.metadata.namespace, label_selector=f\"job-name={job.metadata.name}\",watch=False, limit=200).items\n", "\n", " # Checking the status of each pod\n", " for pod in pods:\n", " # If the pod status is 'Failed', print its namespace and name\n", " if pod.status.phase != \"Succeeded\":\n", " result.append({\"namespace\":pod.metadata.namespace,\"pod_name\":pod.metadata.name})\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"namespace\": \"namespace\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"not pod_names\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"unhealthy_pods\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_get_error_pods_from_all_jobs, lego_printer=k8s_get_error_pods_from_all_jobs_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "9dbff6fe-9b36-4749-8cc5-000b70b7e87d", "metadata": { "name": "Step 1A", "orderProperties": [], "tags": [], "title": "Step 1A" }, "source": [ "Output variable:
\n", "unhealthy_pods
This action gets a list of all objects from the output of Step 1
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "af12bab5-e503-4da9-a74d-dbb88c5f8298", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create list of errored pods", "orderProperties": [], "tags": [], "title": "Create list of errored pods" }, "outputs": [], "source": [ "all_unhealthy_pods = []\n", "try:\n", " if unhealthy_pods[0] == False:\n", " if len(unhealthy_pods[1])!=0:\n", " all_unhealthy_pods=unhealthy_pods[1]\n", "except Exception:\n", " for po in pod_names:\n", " data_dict = {}\n", " data_dict[\"namespace\"] = namespace\n", " data_dict[\"pod_name\"] = po\n", " all_unhealthy_pods.append(data_dict)\n", "print(all_unhealthy_pods)" ] }, { "cell_type": "markdown", "id": "8310152a-97fd-4920-afce-f70dbdf28991", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "This action captures the following output:
\n", "all_uhealthy_pods
This action deletes the pods found in Step 1.
\n", "\n", "\n", "Input parameters:
\n", "pod_name, namespace
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "d0170b6b-6d69-4dd8-9c8f-9128e252659f", "metadata": { "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_K8S", "CATEGORY_TYPE_K8S_POD" ], "actionDescription": "Delete a Kubernetes POD in a given Namespace", "actionEntryFunction": "k8s_delete_pod", "actionIsCheck": false, "actionIsRemediation": false, "actionNeedsCredential": true, "actionNextHop": null, "actionNextHopParameterMapping": null, "actionNouns": null, "actionOutputType": "ACTION_OUTPUT_TYPE_DICT", "actionSupportsIteration": true, "actionSupportsPoll": true, "actionTitle": "Delete a Kubernetes POD in a given Namespace", "actionType": "LEGO_TYPE_K8S", "actionVerbs": null, "actionVersion": "1.0.0", "action_modified": false, "action_uuid": "9e1cc8076571d227dc6d1955fda400e9e29e2306b070d007b72692cfa2281407", "condition_enabled": true, "continueOnError": true, "credentialsJson": {}, "description": "Delete a Kubernetes POD in a given Namespace", "id": 2, "index": 2, "inputData": [ { "namespace": { "constant": false, "value": "\"iter.get(\\\\\"namespace\\\\\")\"" }, "podname": { "constant": false, "value": "\"iter.get(\\\\\"pod_name\\\\\")\"" } } ], "inputschema": [ { "properties": { "namespace": { "description": "Kubernetes namespace", "title": "Namespace", "type": "string" }, "podname": { "description": "K8S Pod Name", "title": "Podname", "type": "string" } }, "required": [ "namespace", "podname" ], "title": "k8s_delete_pod", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": { "namespace": "namespace", "podname": "pod_name" }, "iter_list": { "constant": false, "objectItems": true, "value": "all_unhealthy_pods" } } ], "jupyter": { "source_hidden": true }, "language": "python", "legotype": "LEGO_TYPE_K8S", "name": "Delete a Kubernetes POD in a given Namespace", "orderProperties": [ "namespace", "podname" ], "printOutput": true, "startcondition": "len(all_unhealthy_pods)!=0", "tags": [ "k8s_delete_pod" ], "uuid": "9e1cc8076571d227dc6d1955fda400e9e29e2306b070d007b72692cfa2281407", "version": "1.0.0" }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2021 unSkript.com\n", "# All rights reserved.\n", "#\n", "import pprint\n", "from typing import Dict\n", "from pydantic import BaseModel, Field\n", "from kubernetes import client\n", "from kubernetes.client.rest import ApiException\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_delete_pod_printer(output):\n", " if output is None:\n", " return\n", "\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def k8s_delete_pod(handle, namespace: str, podname: str):\n", " \"\"\"k8s_delete_pod delete a Kubernetes POD in a given Namespace\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type namespace: str\n", " :param namespace: Kubernetes namespace\n", "\n", " :type podname: str\n", " :param podname: K8S Pod Name\n", "\n", " :rtype: Dict of POD info\n", " \"\"\"\n", " coreApiClient = client.CoreV1Api(api_client=handle)\n", "\n", " try:\n", " resp = coreApiClient.delete_namespaced_pod(\n", " name=podname, namespace=namespace)\n", " except ApiException as e:\n", " resp = 'An Exception occurred while executing the command ' + e.reason\n", " raise e\n", " return resp\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"podname\": \"iter.get(\\\\\"pod_name\\\\\")\",\n", " \"namespace\": \"iter.get(\\\\\"namespace\\\\\")\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_unhealthy_pods\",\n", " \"iter_parameter\": [\"podname\",\"namespace\"]\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_unhealthy_pods)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_delete_pod, lego_printer=k8s_delete_pod_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "3b09724f-c9f8-4399-a3d1-aaf8d4866911", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "Output paramerters: None
\n", "
This runbook addressed the issue of failing Kubernetes pods in jobs that were leading to IP exhaustion. By following the steps outlined in this runbook, the failing pods were identified and deleted, preventing further IP exhaustion. Regular monitoring and proactive deletion of failing pods from jobs are crucial to maintaining the stability and availability of the Kubernetes cluster. Implementing this runbook as part of the operational processes will help ensure efficient resource utilization and minimize disruptions caused by IP exhaustion. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "IP Exhaustion Mitigation: Failing K8s Pod Deletion from Jobs", "parameters": [ "namespace" ] }, "kernelspec": { "display_name": "Python 3.10.6 64-bit", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "parameterSchema": { "properties": { "namespace": { "description": "Name of the K8s namespace. Default- all namespaces", "title": "namespace", "type": "string" }, "pod_names": { "description": "Pod names from a particular namespace to delete for failing jobs.", "title": "pod_names", "type": "array" } }, "required": [], "title": "Schema", "type": "object" }, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: Kubernetes/K8S_Delete_Pods_From_Failing_Jobs.json ================================================ { "name": "IP Exhaustion Mitigation: Failing K8s Pod Deletion from Jobs", "description": "Preventing IP exhaustion is critical in Kubernetes environments, and a key strategy is deleting failing pods from jobs. Failing pods can consume valuable IP resources, leading to scarcity and inefficiency. By proactively identifying and removing malfunctioning pods, administrators can promptly free up IP addresses, optimizing resource utilization. This approach ensures that IP allocation remains efficient, enabling the cluster to accommodate new pods without experiencing IP exhaustion. This runbook helps us to identify failing pods within jobs thereby maximizing IP availability for other pods and services.", "uuid": "88e97c46ad944d2f0541cd1f87e3ec5b8a4619f6093e89b55cec53b2a47e45aa", "icon": "CONNECTOR_TYPE_K8S", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_K8S" ], "version": "1.0.0" } ================================================ FILE: Kubernetes/K8S_Deployment_with_multiple_restarts.ipynb ================================================ { "nbformat": 4, "nbformat_minor": 5, "metadata": { "kernelspec": { "name": "python_kubernetes", "display_name": "unSkript (Build: 1267)" }, "language_info": { "name": "python", "file_extension": ".py", "mimetype": "text/x-python", "pygments_lexer": "ipython3" }, "execution_data": { "runbook_name": "k8s: Deployment with multiple restarts", "parameters": [ "namespace", "pod", "app_label", "container", "deployment" ] }, "parameterSchema": { "definitions": null, "properties": { "app_label": { "description": "k8s App Label", "title": "app_label", "type": "string" }, "container": { "description": "k8s container", "title": "container", "type": "string" }, "deployment": { "description": "Name of deployment with the restart issue", "title": "deployment", "type": "string" }, "namespace": { "description": "k8s namespace with problematic deployment", "title": "namespace", "type": "string" }, "pod": { "description": "k8s pod name", "title": "pod", "type": "string" } }, "required": [ "namespace", "app_label", "pod", "deployment" ], "title": "Schema", "type": "object" }, "outputParameterSchema": { "definitions": null, "properties": {}, "required": [], "title": "Schema", "type": "object" }, "parameterValues": {} }, "cells": [ { "id": "cef235be-afe2-45d3-b2a5-291cbb45698a", "cell_type": "markdown", "metadata": { "jupyter": { "source_hidden": false }, "name": "Debug Steps", "orderProperties": [], "tags": [], "title": "Debug Steps" }, "source": "Increase resource allocation:
\nIf the root cause is identified as insufficient resources, increase the allocation of CPU, memory, or other resources to the Kubernetes deployment.
\n", "execution_count": null, "outputs": [] }, { "id": "7c6f855a-ac70-41d0-bf70-2936a3e00ce3", "cell_type": "code", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_K8S", "CATEGORY_TYPE_K8S_KUBECTL" ], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": true, "action_uuid": "8cd969c4db1d03d54d258e2c119e90aa914888abb4d5376b775ade8233bf3ae7", "checkEnabled": false, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Execute kubectl command.", "execution_data": {}, "id": 76, "index": 76, "inputData": [ { "deployment": { "constant": false, "value": "deployment" }, "kubectl_command": { "constant": false, "value": "\"kubectl set resources deployment {deployment} -n {namespace} --limits=cpu={new_cpu_limit},memory={new_memory_limit}\"" }, "namespace": { "constant": false, "value": "namespace" }, "new_cpu_limit": { "constant": false, "value": "12" }, "new_memory_limit": { "constant": false, "value": "12" } } ], "inputschema": [ { "properties": { "deployment": { "default": "", "description": "Deployment Name", "title": "deployment", "type": "string" }, "kubectl_command": { "default": "", "description": "kubectl command eg \"kubectl get pods --all-namespaces\"", "title": "Kubectl Command", "type": "string" }, "namespace": { "default": "", "description": "Namespace", "title": "namespace", "type": "string" }, "new_cpu_limit": { "default": "", "description": "New CPU Limit", "title": "new_cpu_limit", "type": "string" }, "new_memory_limit": { "default": "", "description": "New Memory Limit", "title": "new_memory_limit", "type": "string" } }, "required": [ "kubectl_command", "new_cpu_limit", "new_memory_limit", "deployment", "namespace" ], "title": "k8s_kubectl_command", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Kubectl command -> Increase CPU/Memory Limits", "nouns": [], "orderProperties": [ "kubectl_command", "new_cpu_limit", "new_memory_limit", "deployment", "namespace" ], "output": { "type": "" }, "printOutput": true, "tags": [ "k8s_kubectl_command" ], "title": "Kubectl command -> Increase CPU/Memory Limits", "verbs": [], "execution_count": {} }, "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "from kubernetes.client.rest import ApiException\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_command_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def k8s_kubectl_command(handle, new_cpu_limit, new_memory_limit, deployment, namespace, kubectl_command: str) -> str:\n", " \"\"\"k8s_kubectl_command executes the given kubectl command on the pod\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type kubectl_command: str\n", " :param kubectl_command: The Actual kubectl command, like kubectl get ns, etc..\n", "\n", " :rtype: String, Output of the command in python string format or Empty String\n", " in case of Error.\n", " \"\"\"\n", " if handle.client_side_validation is not True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", "\n", " result = handle.run_native_cmd(kubectl_command.format(deployment=deployment, namespace=namespace, new_cpu_limit=new_cpu_limit, new_memory_limit=new_memory_limit))\n", "\n", " if result is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}) (empty response)\")\n", " return \"\"\n", "\n", " if result.stderr:\n", " raise ApiException(f\"Error occurred while executing command {kubectl_command} {result.stderr}\")\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"kubectl_command\": \"\\\\\"kubectl set resources deployment {deployment} -n {namespace} --limits=cpu={new_cpu_limit},memory={new_memory_limit}\\\\\"\",\n", " \"new_cpu_limit\": \"12\",\n", " \"new_memory_limit\": \"12\",\n", " \"deployment\": \"deployment\",\n", " \"namespace\": \"namespace\"\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_command, lego_printer=k8s_kubectl_command_printer, hdl=hdl, args=args)\n" ], "execution_count": 21, "outputs": [] }, { "id": "2c2aa264-5a2b-4714-adfd-79bdc4ae89d1", "cell_type": "code", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_K8S", "CATEGORY_TYPE_K8S_KUBECTL" ], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": true, "action_uuid": "8cd969c4db1d03d54d258e2c119e90aa914888abb4d5376b775ade8233bf3ae7", "checkEnabled": false, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Execute kubectl command.", "execution_data": {}, "id": 76, "index": 76, "inputData": [ { "deployment": { "constant": false, "value": "deployment" }, "kubectl_command": { "constant": false, "value": "\"kubectl scale deployment -n {namespace} {deployment} --replicas={replicas}\"" }, "namespace": { "constant": false, "value": "namespace" }, "replicas": { "constant": false, "value": "2" } } ], "inputschema": [ { "properties": { "deployment": { "default": "", "description": "Deployment", "title": "deployment", "type": "string" }, "kubectl_command": { "default": "", "description": "kubectl command eg \"kubectl get pods --all-namespaces\"", "title": "Kubectl Command", "type": "string" }, "namespace": { "default": "", "description": "Namespace", "title": "namespace", "type": "string" }, "replicas": { "default": "", "description": "Replica Count", "title": "replicas", "type": "string" } }, "required": [ "deployment", "kubectl_command", "namespace", "replicas" ], "title": "k8s_kubectl_command", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Kubectl command -> Scale Deployment", "nouns": [], "orderProperties": [ "kubectl_command", "replicas", "namespace", "deployment" ], "output": { "type": "" }, "printOutput": true, "tags": [ "k8s_kubectl_command" ], "title": "Kubectl command -> Scale Deployment", "verbs": [], "execution_count": {} }, "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "from kubernetes.client.rest import ApiException\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_command_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def k8s_kubectl_command(handle, kubectl_command: str, namespace, deployment, replicas) -> str:\n", " \"\"\"k8s_kubectl_command executes the given kubectl command on the pod\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type kubectl_command: str\n", " :param kubectl_command: The Actual kubectl command, like kubectl get ns, etc..\n", "\n", " :rtype: String, Output of the command in python string format or Empty String\n", " in case of Error.\n", " \"\"\"\n", " if handle.client_side_validation is not True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", "\n", " result = handle.run_native_cmd(kubectl_command.format(namespace=namespace, deployment=deployment, replicas=replicas))\n", "\n", " if result is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}) (empty response)\")\n", " return \"\"\n", "\n", " if result.stderr:\n", " raise ApiException(f\"Error occurred while executing command {kubectl_command} {result.stderr}\")\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"deployment\": \"deployment\",\n", " \"kubectl_command\": \"\\\\\"kubectl scale deployment -n {namespace} {deployment} --replicas={replicas}\\\\\"\",\n", " \"namespace\": \"namespace\",\n", " \"replicas\": \"2\"\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_command, lego_printer=k8s_kubectl_command_printer, hdl=hdl, args=args)\n" ], "execution_count": 46, "outputs": [] } ] } ================================================ FILE: Kubernetes/K8S_Deployment_with_multiple_restarts.json ================================================ { "name": "k8s: Deployment with multiple restarts", "description": "Kubernetes deployment has experienced multiple restarts within a certain timeframe, which is usually indicative of a problem. When a deployment experiences multiple restarts, it can impact the availability and performance of the application, and can be a sign of underlying issues that need to be addressed.", "uuid": "b138b716b87b7707424b3558b3b007a17d310d73c2fe9308f8702859e8c6a3a7", "icon": "CONNECTOR_TYPE_K8S", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_K8S" ], "version": "1.0.0" } ================================================ FILE: Kubernetes/K8S_Get_Candidate_Nodes_Given_Config.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "5f2fac7e", "metadata": { "jupyter": { "source_hidden": false }, "name": "Runbook Overview", "orderProperties": [], "tags": [], "title": "Runbook Overview" }, "source": [ "
1. Get the matching nodes for a given configuration\n",
"
Here we will use unSkript Get candidate k8s nodes for the given configuration action. This action is used to find out matching nodes for a given configuration.
\n", "\n", "\n", "Input parameters:
\n", "cpu_limit, memory_limit, pod_limit
\n", "\n", "Output variable:
\n", "candidate_nodes
" ] }, { "cell_type": "code", "execution_count": 43, "id": "faff16f3-a562-4d4e-804c-c509efee3cec", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "5326cf5d52f4d62391e32a4290dcca4ac6f023218b01aefcc5be2765391e7ea2", "collapsed": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Get candidate k8s nodes for given configuration", "execution_data": { "last_date_success_run_cell": "2023-02-13T10:59:51.802Z" }, "id": 34, "index": 34, "inputData": [ { "cpu_limit": { "constant": false, "value": "int(cpu_limit)" }, "memory_limit": { "constant": false, "value": "memory_limit" }, "pod_limit": { "constant": false, "value": "int(pod_limit)" } } ], "inputschema": [ { "properties": { "cpu_limit": { "default": 0, "description": "CPU Limit. Eg 2", "title": "CPU Limit", "type": "integer" }, "memory_limit": { "default": "", "description": "Limits and requests for memory are measured in bytes. Accept the store in Mi. Eg 123Mi", "title": "Memory Limit (Mi)", "type": "string" }, "pod_limit": { "default": 0, "description": "Pod Limit. Eg 2", "title": "Number of Pods to attach", "type": "integer" } }, "title": "k8s_get_candidate_nodes_for_pods", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Get candidate k8s nodes for given configuration", "nouns": [ "candidate", "nodes", "configuration" ], "orderProperties": [ "cpu_limit", "memory_limit", "pod_limit" ], "output": { "type": "" }, "outputParams": { "output_name": "candidate_nodes", "output_name_enabled": true }, "printOutput": true, "tags": [ "k8s_get_candidate_nodes_for_pods" ], "title": "Get candidate k8s nodes for given configuration", "trusted": true, "verbs": [ "get" ] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "\n", "import pprint\n", "from typing import Optional\n", "\n", "from kubernetes import client\n", "from pydantic import BaseModel, Field\n", "from tabulate import tabulate\n", "\n", "pp = pprint.PrettyPrinter(indent=2)\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_get_candidate_nodes_for_pods(handle, cpu_limit: int = 0, memory_limit: str = \"\", pod_limit: int = 0):\n", "\n", " coreApiClient = client.CoreV1Api(api_client=handle)\n", "\n", " nodes = coreApiClient.list_node()\n", " match_nodes = [node for node in nodes.items if\n", " (cpu_limit < int(node.status.capacity.get(\"cpu\", 0))) and\n", " (pod_limit < int(node.status.capacity.get(\"pods\", 0))) and\n", " int(memory_limit.split(\"Mi\")[0]) < (int(node.status.capacity.get(\"memory\").split(\"Ki\")[0]) / 1024)]\n", "\n", " if len(match_nodes) > 0:\n", " data = []\n", "\n", " for node in match_nodes:\n", " node_capacity = []\n", " node_capacity.append(node.metadata.name)\n", " for capacity in node.status.capacity.values():\n", " node_capacity.append(capacity)\n", " data.append(node_capacity)\n", "\n", " print(\"\\n\")\n", " print(tabulate(data, tablefmt=\"grid\", headers=[\"Name\", \"cpu\", \"ephemeral-storage\",\n", " \"hugepages-1Gi\", \"hugepages-2Mi\", \"memory\", \"pods\"]))\n", " return match_nodes\n", "\n", " pp.pprint(\"No Matching Nodes Found for this spec\")\n", " return None\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(outputName=\"candidate_nodes\")\n", "task.configure(inputParamsJson='''{\n", " \"cpu_limit\": \"int(cpu_limit)\",\n", " \"memory_limit\": \"memory_limit\",\n", " \"pod_limit\": \"int(pod_limit)\"\n", " }''')\n", "task.configure(printOutput=True)\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.output = task.execute(k8s_get_candidate_nodes_for_pods, hdl=hdl, args=args)\n", " if task.output_name != None:\n", " globals().update({task.output_name: task.output[0]})\n", "\n", "if hasattr(task, 'output'):\n", " if isinstance(task.output, (list, tuple)):\n", " for item in task.output:\n", " print(f'item: {item}')\n", " elif isinstance(task.output, dict):\n", " for item in task.output.items():\n", " print(f'item: {item}')\n", " else:\n", " print(f'Output for {task.name}')\n", " print(task.output)\n", " w.tasks[task.name]= task.output" ] }, { "cell_type": "markdown", "id": "2a154136", "metadata": { "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "
In this Runbook, we demonstrated the use of unSkript's k8s legos to run k8s configuration and get the matching nodes for a given configuration (storage, CPU, memory, pod_limit). To view the full platform capabilities of unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "k8s: Get candidate nodes for given configuration", "parameters": [ "cpu_limit", "ebs_limit", "memory_limit", "pod_limit" ] }, "kernelspec": { "display_name": "unSkript (Build: 839)", "name": "python_kubernetes" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3" }, "parameterSchema": { "properties": { "cpu_limit": { "default": 1, "description": "CPU Limit. Eg 2", "title": "cpu_limit", "type": "number" }, "ebs_limit": { "default": 1, "description": "EBS Volume Limit in Gb. Eg 25", "title": "ebs_limit", "type": "number" }, "memory_limit": { "default": "65Mi", "description": "Memory limits and requests are measured in bytes. Eg 64Mi", "title": "memory_limit", "type": "string" }, "pod_limit": { "default": 1, "description": "Limit on pods", "title": "pod_limit", "type": "number" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": { "cpu_limit": 1, "ebs_limit": 1, "memory_limit": "65Mi", "pod_limit": 1 } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: Kubernetes/K8S_Get_Candidate_Nodes_Given_Config.json ================================================ { "name": "k8s: Get candidate nodes for given configuration", "description": "This runbook get the matching nodes for a given configuration (storage, cpu, memory, pod_limit) from a k8s cluster", "uuid": "d85523e7d07d1413b8dde69caa4cd444057220b7a43c08ea0432b14cfdd01d36", "icon": "CONNECTOR_TYPE_K8S", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_K8S" ], "version": "1.0.0" } ================================================ FILE: Kubernetes/K8S_Log_Healthcheck.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "8c2def3e-168a-408c-b85d-49048cdd54cd", "metadata": { "jupyter": { "source_hidden": false }, "name": "K8s Log healthcheck", "orderProperties": [], "tags": [], "title": "K8s Log healthcheck" }, "source": [ "This runbook grabs all of your K8s pods, reads the logs from them, and then output any WARNING logs from the last hour.
\n", "\n", "
The input required is the namespace - from the input parameters.
\n", "\n", "
This will then query the namespace and return a list of pods in the Output variable 'podList.'
" ] }, { "cell_type": "code", "execution_count": 24, "id": "0cc3b3cf-638c-4b01-ae49-27cb6e30c79e", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "9e74360f92185496ce46b5110f5551edb1907d29ceed02dbb7b6a1a0b16e7e27", "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Kubectl list pods in given namespace", "execution_data": { "last_date_success_run_cell": "2023-01-13T20:02:23.900Z" }, "id": 47, "index": 47, "inputData": [ { "k8s_cli_string": { "constant": false, "value": "\"kubectl get pods -n {namespace}\"" }, "namespace": { "constant": false, "value": "namespace" } } ], "inputschema": [ { "properties": { "k8s_cli_string": { "default": "\"kubectl get pods -n {namespace}\"", "description": "kubectl List pods in given namespace", "title": "Kubectl Command", "type": "string" }, "namespace": { "description": "Namespace", "title": "Namespace", "type": "string" } }, "required": [ "namespace" ], "title": "k8s_kubectl_list_pods", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Kubectl list pods", "nouns": [], "orderProperties": [ "k8s_cli_string", "namespace" ], "output": { "type": "" }, "outputParams": { "output_name": "podList", "output_name_enabled": true }, "printOutput": true, "tags": [ "k8s_kubectl_list_pods" ], "verbs": [] }, "outputs": [], "source": [ "from pydantic import BaseModel, Field\n", "import pandas as pd\n", "import io\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_list_pods_printer(data: list):\n", " if data is None:\n", " return\n", "\n", " print(\"POD List:\")\n", "\n", " for pod in data:\n", " print(f\"\\t {pod}\")\n", "\n", "@beartype\n", "def k8s_kubectl_list_pods(handle, k8s_cli_string: str, namespace: str) -> list:\n", " \"\"\"k8s_kubectl_list_pods executes the given kubectl command\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type k8s_cli_string: str\n", " :param k8s_cli_string: kubectl get pods -n {namespace}.\n", "\n", " :type namespace: str\n", " :param namespace: Namespace.\n", "\n", " :rtype:\n", " \"\"\"\n", " k8s_cli_string = k8s_cli_string.format(namespace=namespace)\n", " result = handle.run_native_cmd(k8s_cli_string)\n", " df = pd.read_fwf(io.StringIO(result.stdout))\n", " all_pods = []\n", " for index, row in df.iterrows():\n", " all_pods.append(row['NAME'])\n", " return all_pods\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"k8s_cli_string\": \"\\\\\"kubectl get pods -n {namespace}\\\\\"\",\n", " \"namespace\": \"namespace\"\n", " }''')\n", "task.configure(outputName=\"podList\")\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_list_pods, lego_printer=k8s_kubectl_list_pods_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "d626b3d5-16fd-4878-a937-3e880a1442be", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2: get all of the logs", "orderProperties": [], "tags": [], "title": "Step 2: get all of the logs" }, "source": [ "Step 2 takes the list of pod 'pod\"list' from Step one, and the namespace input parameter, and obtains the logs for all of the Pods.
\n", "\n", "
We use the Iterator to iterate through the list. This can take a while if you have a lot of pods.
\n", "\n", "
The output is saved in a Dict called `allTheLogs'
" ] }, { "cell_type": "code", "execution_count": 25, "id": "5404a1ee-efd1-4bf6-91a8-e7d240e6ae43", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "7a54aaf7808d98bce5132bc5b5224a084d63ca31921dc362f5b91fbc581cd0da", "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Kubectl get logs for a given pod", "execution_data": { "last_date_success_run_cell": "2023-01-13T20:08:25.384Z" }, "id": 35, "index": 35, "inputData": [ { "k8s_cli_string": { "constant": false, "value": "\"kubectl logs {pod_name} -n {namespace}\"" }, "namespace": { "constant": false, "value": "namespace" }, "pod_name": { "constant": false, "value": "iter_item" } } ], "inputschema": [ { "properties": { "k8s_cli_string": { "default": "\"kubectl logs {pod_name} -n {namespace}\"", "description": "kubectl get logs for a given pod", "title": "Kubectl Command", "type": "string" }, "namespace": { "description": "Namespace", "title": "Namespace", "type": "string" }, "pod_name": { "description": "Pod Name", "title": "Pod Name", "type": "string" } }, "required": [ "pod_name", "namespace" ], "title": "k8s_kubectl_get_logs", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": "pod_name", "iter_list": { "constant": false, "objectItems": false, "value": "podList" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Kubectl get logs", "nouns": [], "orderProperties": [ "k8s_cli_string", "pod_name", "namespace" ], "output": { "type": "" }, "outputParams": { "output_name": "allTheLogs", "output_name_enabled": true }, "printOutput": true, "tags": [ "k8s_kubectl_get_logs" ], "verbs": [] }, "outputs": [], "source": [ "from pydantic import BaseModel, Field\n", "from pprint import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_get_logs_printer(data: str):\n", " if data is None:\n", " return\n", "\n", " print(\"Logs:\")\n", "\n", " pprint (data)\n", "\n", "@beartype\n", "def k8s_kubectl_get_logs(handle, k8s_cli_string: str, pod_name: str, namespace:str) -> str:\n", " \"\"\"k8s_kubectl_get_logs executes the given kubectl command\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type k8s_cli_string: str\n", " :param k8s_cli_string: kubectl logs {pod_name} -n {namespace}.\n", "\n", " :type pod_name: str\n", " :param pod_name: Pod Name.\n", "\n", " :type namespace: str\n", " :param namespace: Namespace.\n", "\n", " :rtype: String, Output of the command in python string format or Empty String in case of Error.\n", " \"\"\"\n", " k8s_cli_string = k8s_cli_string.format(pod_name=pod_name, namespace=namespace)\n", " result = handle.run_native_cmd(k8s_cli_string)\n", " data = result.stdout\n", " return data\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=False)\n", "task.configure(inputParamsJson='''{\n", " \"k8s_cli_string\": \"\\\\\"kubectl logs {pod_name} -n {namespace}\\\\\"\",\n", " \"namespace\": \"namespace\",\n", " \"pod_name\": \"iter_item\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"podList\",\n", " \"iter_parameter\": \"pod_name\"\n", " }''')\n", "task.configure(outputName=\"allTheLogs\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_get_logs, lego_printer=k8s_kubectl_get_logs_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "7d75d8a6-49e8-479a-a250-827685c7c376", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 3: parse the logs", "orderProperties": [], "tags": [], "title": "Step 3: parse the logs" }, "source": [ "'allTheLogs' is a pretty big file. Loop through each log file, and extract any WARNING messages.
We use the input parameter hoursToExamine to filter for logs back that many hours.
\n", "\n", "
\n", "
" ] }, { "cell_type": "code", "execution_count": 26, "id": "62686394-a57f-47ab-9b1d-1022869f25c1", "metadata": { "execution_data": { "last_date_success_run_cell": "2023-01-13T20:16:14.980Z" }, "jupyter": { "source_hidden": true }, "name": "parse dict of logs", "orderProperties": [], "tags": [], "title": "parse dict of logs", "credentialsJson": {} }, "outputs": [], "source": [ "import re\n", "from datetime import datetime, timedelta\n", "\n", "#get all warnings\n", "#only report warnings fournd in the x hours\n", "timeDiff = datetime.now()- timedelta(hours=hoursToExamine)\n", "#if there are warnings that are ok to supress, add them to this list\n", "stringsToIgnore = [\"arerqewreqwr\" ]\n", "#this will hold all the warnings\n", "warning_text_all = {}\n", "\n", "#Specific issues we can deal with\n", "primaryShardIsNotActive = False\n", "\n", "#we've collected a bunch of logs, lets loop through them for Warnings\n", "for instance in allTheLogs:\n", " #print(instance)\n", " log = allTheLogs[instance]\n", " #find the position of all instances of '[WARN' in the logs\n", " warning_start = [m.start() for m in re.finditer(re.escape('[WARN'), log)]\n", " \n", " for i in warning_start:\n", " warningtime = log[i-24:i-5]\n", " issue = log[i:i+400]\n", " warningtimeDT = datetime.strptime(warningtime, '%Y-%m-%dT%H:%M:%S')\n", " if warningtimeDT > timeDiff:\n", " if issue not in stringsToIgnore:\n", " warning_text_all[instance] = { warningtime:issue}\n", " #test for specific issues\n", " if issue.find(\"primary shard is not active Timeout\")>0:\n", " primaryShardIsNotActive = True\n", " \n", "print(warning_text_all, len(warning_text_all))" ] }, { "cell_type": "markdown", "id": "af26fd0a-7621-4016-8a0d-8a0492ce1b17", "metadata": { "jupyter": { "source_hidden": false }, "name": "Alerts!", "orderProperties": [], "tags": [], "title": "Alerts!" }, "source": [ "
Only send a slack message if there is a problem.
\n", "\n", "
To facilitate this, we use the Start Condition
\n", "```
\n", "len(warning_text_all) >0
\n", "```
\n", "If there are warnings, a Slack message is sent. If there are no warnings, there is no message.
" ] }, { "cell_type": "code", "execution_count": 27, "id": "ca14605f-1ca3-438b-951c-a3f680bcdb86", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "6a87f83ab0ecfeecb9c98d084e2b1066c26fa64be5b4928d5573a5d60299802d", "condition_enabled": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Post Slack Message", "execution_data": { "last_date_success_run_cell": "2023-01-13T20:09:57.724Z" }, "id": 78, "index": 78, "inputData": [ { "channel": { "constant": false, "value": "\"unskript-healthcheck\"" }, "message": { "constant": false, "value": "warning_text_all" } } ], "inputschema": [ { "properties": { "channel": { "description": "Name of slack channel.", "title": "Channel", "type": "string" }, "message": { "description": "Message for slack channel.", "title": "Message", "type": "string" } }, "required": [ "channel", "message" ], "title": "slack_post_message", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_SLACK", "name": "Post Slack Message", "nouns": [], "orderProperties": [ "channel", "message" ], "output": { "type": "" }, "printOutput": true, "startcondition": "len(warning_text_all) >0", "tags": [ "slack_post_message" ], "verbs": [] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "\n", "import pprint\n", "\n", "from pydantic import BaseModel, Field\n", "from slack_sdk import WebClient\n", "from slack_sdk.errors import SlackApiError\n", "\n", "pp = pprint.PrettyPrinter(indent=2)\n", "\n", "from beartype import beartype\n", "\n", "from beartype import beartype\n", "@beartype\n", "def slack_post_message_printer(output):\n", " if output is not None:\n", " pprint.pprint(output)\n", " else:\n", " return\n", "\n", "\n", "@beartype\n", "@beartype\n", "def slack_post_message(\n", " handle: WebClient,\n", " channel: str,\n", " message: str) -> str:\n", "\n", " try:\n", " response = handle.chat_postMessage(\n", " channel=channel,\n", " text=message)\n", " return f\"Successfuly Sent Message on Channel: #{channel}\"\n", " except SlackApiError as e:\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.response['error']}\")\n", " if e.response['error'] == 'channel_not_found':\n", " raise Exception('Channel Not Found')\n", " elif e.response['error'] == 'duplicate_channel_not_found':\n", " raise Exception('Channel associated with the message_id not valid')\n", " elif e.response['error'] == 'not_in_channel':\n", " raise Exception('Cannot post message to channel user is not in')\n", " elif e.response['error'] == 'is_archived':\n", " raise Exception('Channel has been archived')\n", " elif e.response['error'] == 'msg_too_long':\n", " raise Exception('Message text is too long')\n", " elif e.response['error'] == 'no_text':\n", " raise Exception('Message text was not provided')\n", " elif e.response['error'] == 'restricted_action':\n", " raise Exception('Workspace preference prevents user from posting')\n", " elif e.response['error'] == 'restricted_action_read_only_channel':\n", " raise Exception('Cannot Post message, read-only channel')\n", " elif e.response['error'] == 'team_access_not_granted':\n", " raise Exception('The token used is not granted access to the workspace')\n", " elif e.response['error'] == 'not_authed':\n", " raise Exception('No Authtnecition token provided')\n", " elif e.response['error'] == 'invalid_auth':\n", " raise Exception('Some aspect of Authentication cannot be validated. Request denied')\n", " elif e.response['error'] == 'access_denied':\n", " raise Exception('Access to a resource specified in the request denied')\n", " elif e.response['error'] == 'account_inactive':\n", " raise Exception('Authentication token is for a deleted user')\n", " elif e.response['error'] == 'token_revoked':\n", " raise Exception('Authentication token for a deleted user has been revoked')\n", " elif e.response['error'] == 'no_permission':\n", " raise Exception('The workspace toekn used does not have necessary permission to send message')\n", " elif e.response['error'] == 'ratelimited':\n", " raise Exception('The request has been ratelimited. Retry sending message later')\n", " elif e.response['error'] == 'service_unavailable':\n", " raise Exception('The service is temporarily unavailable')\n", " elif e.response['error'] == 'fatal_error':\n", " raise Exception('The server encountered catostrophic error while sending message')\n", " elif e.response['error'] == 'internal_error':\n", " raise Exception('The server could not complete operation, likely due to transietn issue')\n", " elif e.response['error'] == 'request_timeout':\n", " raise Exception('Sending message error via POST: either message was missing or truncated')\n", " else:\n", " raise Exception(f'Failed Sending Message to slack channel {channel} Error: {e.response[\"error\"]}')\n", "\n", " except Exception as e:\n", " print(\"\\n\\n\")\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.__str__()}\")\n", " return f\"Unable to send message on {channel}\"\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \n", " \"message\": \"warning_text_all\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(warning_text_all) >0\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(slack_post_message, lego_printer=slack_post_message_printer, hdl=hdl, args=args)" ] } ], "metadata": { "execution_data": { "runbook_name": "Kubernetes Log Healthcheck", "parameters": [ "hoursToExamine", "namespace" ] }, "kernelspec": { "display_name": "unSkript (Build: 813)", "name": "python_kubernetes" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3" }, "parameterSchema": { "properties": { "hoursToExamine": { "default": 1, "description": "Hours to look back in the logs for WARNING messages. If you set hours =1, this runbook should be run hourly. If you choose 24 hours, then run it daily.", "title": "hoursToExamine", "type": "number" }, "namespace": { "default": "logging", "description": "The namespace for your K8s instances", "title": "namespace", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": { "hoursToExamine": "\"float(1)\"", "namespace": "logging" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: Kubernetes/K8S_Log_Healthcheck.json ================================================ { "name": "Kubernetes Log Healthcheck", "description": "This RunBook checks the logs of every pod in a namespace for warning messages.", "uuid": "ee1aa2cb2a0854604bcc516389cf542af17c8de07e5da70524286a112c4eef6f", "icon": "CONNECTOR_TYPE_K8S", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_TROUBLESHOOTING" ], "connector_types": [ "CONNECTOR_TYPE_K8S" ], "version": "1.0.0" } ================================================ FILE: Kubernetes/K8S_Pod_Stuck_In_CrashLoopBack_State.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "2a1bc075-e2c8-466a-9aa6-07e84c21c162", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Get list of pods in CrashLoopBackOff State
2) Gather information of the pod
3) Collect pod exit code
A CrashLoopBackOff error occurs when a pod startup fails repeatedly in Kubernetes.
When running. a kubectl get pods command, you would see something like this\n",
"\n",
"NAME READY STATUS RESTARTS AGE\n",
"nginx-7ef9efa7cd-qasd2 0/1 CrashLoopBackOff 2 1m\n",
"\n",
"Or\n",
"\n",
"NAME READY STATUS RESTARTS AGE\n",
"pod1-7ef9efa7cd-qasd2 0/2 Init:CrashLoopBackOff 2 1m\n",
"\n",
"This custom action changes the type of namespace from None to String only if no namespace is given
" ] }, { "cell_type": "code", "execution_count": null, "id": "16a31ef5-a834-4878-afa5-79f64dfa0c3d", "metadata": { "collapsed": true, "customAction": true, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Convert namespace to String if empty", "orderProperties": [], "tags": [], "title": "Convert namespace to String if empty", "credentialsJson": {} }, "outputs": [], "source": [ "if namespace==None:\n", " namespace=''" ] }, { "cell_type": "markdown", "id": "60e73ca7-e3a8-42d3-a3bb-87ad2baa1f91", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 1", "orderProperties": [], "tags": [], "title": "Step 1" }, "source": [ "This action fetches a list of the pods in CrashLoopBack State. This action will consider namespace as all if no namespace is given.
\n", "\n", "This action takes the following parameters (Optional):
\n", "namespace
\n", "" ] }, { "cell_type": "code", "execution_count": 4, "id": "060496ab-6cef-4a23-8a93-194cb8774ea3", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "d8047bf803242cfbfd1a19e28d64ae8d95168f8edb753ae4e1e7a7af1ffccf07", "collapsed": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Get all K8s pods in CrashLoopBackOff State", "execution_data": { "last_date_success_run_cell": "2023-02-10T12:54:10.973Z" }, "id": 26, "index": 26, "inputData": [ { "namespace": { "constant": false, "value": "str(namespace)" } } ], "inputschema": [ { "properties": { "namespace": { "default": "", "description": "k8s Namespace", "title": "Namespace", "type": "string" } }, "title": "k8s_get_pods_in_crashloopbackoff_state", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Get all K8s Pods in CrashLoopBackOff State", "nouns": [], "orderProperties": [ "namespace" ], "output": { "type": "" }, "outputParams": { "output_name": "crashloopbackoff_pods", "output_name_enabled": true }, "printOutput": true, "probeEnabled": false, "tags": [ "k8s_get_pods_in_crashloopbackoff_state" ], "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Tuple\n", "from unskript.legos.utils import CheckOutput, CheckOutputStatus\n", "from collections import defaultdict\n", "import json\n", "import pprint\n", "import re\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_get_pods_in_crashloopbackoff_state_printer(output):\n", " if output is None:\n", " return\n", " if isinstance(output, CheckOutput):\n", " print(output.json())\n", " else:\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def k8s_get_pods_in_crashloopbackoff_state(handle, namespace: str=None) -> Tuple:\n", " \"\"\"k8s_get_pods_in_crashloopbackoff_state executes the given kubectl command to find pods in CrashLoopBackOff State\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type namespace: Optional[str]\n", " :param namespace: Namespace to get the pods from. Eg:\"logging\", if not given all namespaces are considered\n", "\n", " :rtype: Status, List of pods in CrashLoopBackOff State\n", " \"\"\"\n", " if handle.client_side_validation != True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", " kubectl_command =\"kubectl get pods --all-namespaces | grep CrashLoopBackOff | tr -s ' ' | cut -d ' ' -f 1,2\"\n", " if namespace:\n", " kubectl_command = \"kubectl get pods -n \" + namespace + \" | grep CrashLoopBackOff | cut -d' ' -f 1 | tr -d ' '\"\n", " response = handle.run_native_cmd(kubectl_command)\n", " if response is None or hasattr(response, \"stderr\") is False or response.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {response.stderr}\")\n", " return str()\n", " temp = response.stdout\n", " result = []\n", " res = []\n", " unhealthy_pods =[]\n", " unhealthy_pods_tuple = ()\n", " if not namespace:\n", " all_namespaces = re.findall(r\"(\\S+).*\",temp)\n", " all_unhealthy_pods = re.findall(r\"\\S+\\s+(.*)\",temp)\n", " unhealthy_pods = [(i, j) for i, j in zip(all_namespaces, all_unhealthy_pods)]\n", " res = defaultdict(list)\n", " for key, val in unhealthy_pods:\n", " res[key].append(val)\n", " elif namespace:\n", " all_pods = []\n", " all_unhealthy_pods =[]\n", " all_pods = re.findall(r\"(\\S+).*\",temp)\n", " for p in all_pods:\n", " unhealthy_pods_tuple = (namespace,p)\n", " unhealthy_pods.append(unhealthy_pods_tuple)\n", " res = defaultdict(list)\n", " for key, val in unhealthy_pods:\n", " res[key].append(val)\n", " if len(res)!=0:\n", " result.append(dict(res))\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"namespace\": \"namespace\"\n", " }''')\n", "task.configure(outputName=\"crashloopbackoff_pods\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_get_pods_in_crashloopbackoff_state, lego_printer=k8s_get_pods_in_crashloopbackoff_state_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "ba58fe60-9922-4c86-b0d6-d76d4db71249", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 1A", "orderProperties": [], "tags": [], "title": "Step 1A" }, "source": [ "This action captures the following ouput:
\n", "crashloopbackoff_pods
Examine the output from Step 1\ud83d\udc46, and create a list of commands for each pod in a namespace that is found to be in the CrashLoopBackOff State
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 5, "id": "042b8352-5769-403c-9c22-432fa48de97d", "metadata": { "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-09T11:18:22.306Z" }, "jupyter": { "source_hidden": true }, "name": "Create List of commands to get Events", "orderProperties": [], "tags": [], "title": "Create List of commands to get Events", "credentialsJson": {} }, "outputs": [], "source": [ "all_unhealthy_pods = []\n", "for each_pod_dict in crashloopbackoff_pods:\n", " if type(each_pod_dict)==list:\n", " for pod in each_pod_dict:\n", " for k,v in pod.items():\n", " if len(v)!=0:\n", " nspace = k\n", " u_pod = ' '.join([str(each_pod) for each_pod in v])\n", " cmd = \"kubectl describe pod \"+u_pod+\" -n \"+nspace+\" | grep -A 10 Events\"\n", " all_unhealthy_pods.append(cmd)\n", "print(all_unhealthy_pods)" ] }, { "cell_type": "markdown", "id": "528330e1-c862-42bc-9056-05608a78d437", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following ouput:
\n", "all_unhealthy_pods
This action describes events for a list of unhealthy pods obtained in Step 1.
\n", "\n", "\n", "This action takes the following parameters (Optional):
\n", "namespace
\n", "" ] }, { "cell_type": "code", "execution_count": 6, "id": "5d45773d-cf52-4dcb-8a35-01219781cf8f", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "ae0b25757f0c6c0ca4b3aaf6feea636e3f193dc354f74823a7becd7d675becdc", "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Kubectl command in python syntax.", "execution_data": { "last_date_success_run_cell": "2023-02-09T11:19:18.986Z" }, "id": 21, "index": 21, "inputData": [ { "kubectl_command": { "constant": false, "value": "iter_item" } } ], "inputschema": [ { "properties": { "kubectl_command": { "description": "kubectl command eg \"kubectl get pods --all-namespaces\"", "title": "Kubectl Command", "type": "string" } }, "required": [ "kubectl_command" ], "title": "k8s_kubectl_command", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": "kubectl_command", "iter_list": { "constant": false, "objectItems": false, "value": "all_unhealthy_pods" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Gather Information of the pod", "nouns": [ "command" ], "orderProperties": [ "kubectl_command" ], "output": { "type": "" }, "outputParams": { "output_name": "describe_output", "output_name_enabled": true }, "printOutput": true, "startcondition": "len(all_unhealthy_pods)!=0", "tags": [ "k8s_kubectl_command" ], "title": "Gather Information of the pod", "verbs": [ "execute" ], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2021 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_command(handle, kubectl_command: str) -> str:\n", "\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result is None or hasattr(result, \"stderr\") is False or result.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return None\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(printOutput=True)\n", "task.configure(inputParamsJson='''{\n", " \"kubectl_command\": \"iter_item\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_unhealthy_pods\",\n", " \"iter_parameter\": \"kubectl_command\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_unhealthy_pods)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"describe_output\")\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_command, hdl=hdl, args=args)\n", "\n", "if hasattr(task, 'output'):\n", " if isinstance(task.output, (list, tuple)):\n", " for item in task.output:\n", " print(f'item: {item}')\n", " elif isinstance(task.output, dict):\n", " for item in task.output.items():\n", " print(f'item: {item}')\n", " else:\n", " print(f'Output for {task.name}')\n", " print(task.output)\n", " w.tasks[task.name]= task.output" ] }, { "cell_type": "markdown", "id": "26886eb4-ca1f-40f0-a2da-c34af115ae69", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following ouput:
\n", "describe_output
From the output from Step 2\ud83d\udc46, we convert the dict output to a string format.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 26, "id": "50d94b8f-7c44-413e-b653-72c59ab1ee15", "metadata": { "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-08T12:26:44.491Z" }, "jupyter": { "source_hidden": true }, "name": "Convert to String ", "orderProperties": [], "tags": [], "title": "Convert to String ", "credentialsJson": {} }, "outputs": [], "source": [ "import json\n", "\n", "all_describe_info = json.dumps(describe_output)\n", "print(all_describe_info)" ] }, { "cell_type": "markdown", "id": "ba918b53-4a49-494d-956f-073849b6cd9e", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2B", "orderProperties": [], "tags": [], "title": "Step 2B" }, "source": [ "This action captures the following ouput:
\n", "all_describe_info
Examine the output from Step 2A\ud83d\udc46, and make a note of any containers that have a Back-off restarting failed container in the description.
From the output from Step 1\ud83d\udc46create a list of commands for each pod in a namespace to get the exit code for each pod to examine the reason of failure.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 7, "id": "e19a6db0-d941-4e62-8a3b-05105389ebfe", "metadata": { "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-09T11:20:07.998Z" }, "jupyter": { "source_hidden": true }, "name": "Create List of commands to get Exit Code", "orderProperties": [], "tags": [], "title": "Create List of commands to get Exit Code", "credentialsJson": {} }, "outputs": [], "source": [ "all_pods_exit_code = []\n", "for x in crashloopbackoff_pods:\n", " if type(x[1])==list:\n", " if len(x[1])!=0:\n", " for pod in x[1]:\n", " for k,v in pod.items():\n", " nspace = k\n", " u_pod = ' '.join([str(each_pod) for each_pod in v])\n", " cmd = \"kubectl describe pod \"+u_pod+\" -n \"+nspace+\" | grep \\\\\"+\"Exit Code\"+\"\\\\\"+\" | cut -d':' -f 2 | tr -d ' '\"\n", " all_pods_exit_code.append(cmd)\n", "print(all_pods_exit_code)" ] }, { "cell_type": "markdown", "id": "e9ef9f2a-2cd8-4bb9-9efc-746e2ec958d2", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 3", "orderProperties": [], "tags": [], "title": "Step 3" }, "source": [ "This action captures the following ouput:
\n", "all_pods_exit_code
Examine the output from Step 1\ud83d\udc46, and look for the Exit Code.
\n", "This action captures the following ouput: exit_code" ] }, { "cell_type": "code", "execution_count": 31, "id": "e8db2cae-8894-47a0-8b88-d2275314acd7", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "4d3b4c6153e14622f42b332b7c7b8f7043577971f64edc5be6b5f8b40d5b89d1", "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Execute the given Kubectl command.", "execution_data": { "last_date_success_run_cell": "2023-02-08T12:34:23.155Z" }, "id": 51, "index": 51, "inputData": [ { "kubectl_command": { "constant": false, "value": "iter_item" } } ], "inputschema": [ { "properties": { "kubectl_command": { "description": "kubectl command eg \"kubectl get pods --all-namespaces\"", "title": "Kubectl Command", "type": "string" } }, "required": [ "kubectl_command" ], "title": "k8s_kubectl_command", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": "kubectl_command", "iter_list": { "constant": false, "objectItems": false, "value": "all_pods_exit_code" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Collect pod exit code", "nouns": [], "orderProperties": [ "kubectl_command" ], "output": { "type": "" }, "outputParams": { "output_name": "exit_code", "output_name_enabled": true }, "printOutput": true, "probeEnabled": false, "startcondition": "len(all_pods_exit_code)!=0", "tags": [ "k8s_kubectl_command" ], "title": "Collect pod exit code", "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_command_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def k8s_kubectl_command(handle, kubectl_command: str) -> str:\n", " \"\"\"k8s_kubectl_command executes the given kubectl command on the pod\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type kubectl_command: str\n", " :param kubectl_command: The Actual kubectl command, like kubectl get ns, etc..\n", "\n", " :rtype: String, Output of the command in python string format or Empty String in case of Error.\n", " \"\"\"\n", " if handle.client_side_validation != True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", "\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result is None or hasattr(result, \"stderr\") is False or result.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return str()\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"kubectl_command\": \"iter_item\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_pods_exit_code\",\n", " \"iter_parameter\": \"kubectl_command\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_pods_exit_code)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"exit_code\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_command, lego_printer=k8s_kubectl_command_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "21e2e967-6514-4b87-b43b-b1f0e95b4ac2", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 3B", "orderProperties": [], "tags": [], "title": "Step 3B" }, "source": [ "
From the output from Step 3\ud83d\udc46create a list of exit codes to ananlyze in Step 3C.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 45, "id": "5351b111-f025-4952-a3dc-917047966aab", "metadata": { "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-08T12:50:44.137Z" }, "jupyter": { "source_hidden": true }, "name": "Create List of Exit Codes", "orderProperties": [], "tags": [], "title": "Create List of Exit Codes", "credentialsJson": {} }, "outputs": [], "source": [ "import json\n", "all_exit_code_info = []\n", "for k,v in exit_code.items():\n", " all_exit_code_info.append(v)\n", "print(all_exit_code_info)" ] }, { "cell_type": "markdown", "id": "86de5ae7-00f3-424a-9740-c02cd0cab643", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 3C", "orderProperties": [], "tags": [], "title": "Step 3C" }, "source": [ "This action captures the following ouput:
\n", "all_exit_code_info
Using the exit_codes list from Step 3B\ud83d\udc46examine each code.
" ] }, { "cell_type": "code", "execution_count": 52, "id": "6c8adc48-7c21-40cc-8dbc-77a9d46843fc", "metadata": { "execution_data": { "last_date_success_run_cell": "2023-02-08T12:54:26.923Z" }, "jupyter": { "source_hidden": true }, "name": "Examine Exit Code", "orderProperties": [], "tags": [], "title": "Examine Exit Code", "credentialsJson": {} }, "outputs": [], "source": [ "from IPython.display import Markdown as md\n", "\n", "# if repoLocation is not None:\n", "# display(md(f\"**Please verify {repoLocation} is accessible from the K8S POD**\"))\n", "\n", "if 'all_exit_code_info' not in globals():\n", " pass\n", "else:\n", " for ec in all_exit_code_info:\n", " if ec is None or len(ec)==0:\n", " exitCode = 323400\n", " if ec is not None or len(ec)!=0:\n", " exitCode = int(ec)\n", " if exitCode == 0:\n", " display(md(\"Exit code 0 implies that the specified container command completed\"))\n", " display(md(\"Successfully, but too often for Kubernetes to accept as working.\"))\n", " display(md(\"\"))\n", " display(md(\"Did you fail to specify a command in the POD Spec, and the container ran\"))\n", " display(md(\"a default shell command that failed? If so, you will need to fix the command\"))\n", " elif exitCode == 1:\n", " display(md(\"The container failed to run its command successfully, and returned\"))\n", " display(md(\"an exit code 1. This is an application failure within the process\"))\n", " display(md(\"that was started, but return with a failing exit code some time after.\"))\n", " display(md(\"\"))\n", " display(md(\"If this is happening only with all pods running on your cluster, then\"))\n", " display(md(\"there may be a problem with your nodes. Check Nodes are OK on your cluster\"))\n", " display(md(\"with kubectl get nodes -o wide command\"))\n", " elif exitCode == 2:\n", " display(md(\"An exit code of 2 indicates either that the application chose to return\"))\n", " display(md(\"that error code, or there was a misuse of a shell builtin. Check your\"))\n", " display(md(\"pod's command specification to ensure that the command is correct.\"))\n", " display(md(\"If you think it is correct, try running the image locally with a shell\"))\n", " display(md(\"and run the command directly.\"))\n", " elif exitCode == 128:\n", " display(md(\"An exit code of 128 indicates that the container could not run. Check this\"))\n", " display(md(\"by kubectl describe pod command, check to see if LastState Reason is\"))\n", " display(md(\"ContainerCannotRun.\"))\n", " elif exitCode == 137:\n", " display(md(\"This indicates that the container was killed with Signal 9\"))\n", " display(md(\"This can be due to One of these reasons:\"))\n", " display(md(\" 1. Container ran out of Memory\"))\n", " display(md(\" 2. The OOMKiller killed the container\"))\n", " display(md(\" 3. The liveness probe failed. Check liveness and readiness probes\"))\n", " else:\n", " display(md(\"Some common application problem to consider are\"))\n", " display(md(\" 1. Priveleged access to function. By setting allowPrivelegeEscalation\"))\n", " display(md(\" 2. SELinux or AppArmor controls may be preventing your application to run\"))\n", " \n", "\n", " display(md(\">You can use kubectl get pods command to verify after you fix the issue\"))" ] }, { "cell_type": "markdown", "id": "e25b3628-8ff0-401e-b909-e4955e45f397", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "In this Runbook, we were able to identify pods stuck in CrashLoopBackOff State and examined the possible event that caused it's failure using unSkript's K8s actions. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "k8s: Pod Stuck in CrashLoopBackoff State", "parameters": [ "namespace" ] }, "kernelspec": { "display_name": "unSkript (Build: 839)", "name": "python_kubernetes" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3" }, "parameterSchema": { "properties": { "namespace": { "description": "K8S Namespace", "title": "namespace", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": null }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: Kubernetes/K8S_Pod_Stuck_In_CrashLoopBack_State.json ================================================ { "name": "k8s: Pod Stuck in CrashLoopBackoff State", "description": "This runbook checks if any Pod(s) in CrashLoopBackoff state in a given k8s namespace. If it finds, it tries to find out the reason why the Pod(s) is in that state.", "uuid": "1d3a64b3c396be6d27b260606aa5570f61e79f3b7adcda457e026da657edc079", "icon": "CONNECTOR_TYPE_K8S", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_K8S" ], "version": "1.0.0" } ================================================ FILE: Kubernetes/K8S_Pod_Stuck_In_ImagePullBackOff_State.ipynb ================================================ { "cells": [ { "cell_type": "code", "execution_count": 2, "id": "56630bd7-a4d2-492d-bb06-5a3027a321f1", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-06-07T16:32:28.600Z" }, "name": "Click \"Run Action\" For a video tutorial -->", "orderProperties": [], "tags": [], "title": "Click \"Run Action\" For a video tutorial -->", "trusted": true }, "outputs": [], "source": [ "%%html\n", "\n" ] }, { "cell_type": "markdown", "id": "f518e5b7-08a7-425c-9d86-cfc629d5b355", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Get list of pods in ImagePullBackOff State
2) Extract Events of the pods
3) Check registry accessibility
An ImagePullBackOff error occurs when a Pod startup fails to pull the specified image. The reasons could be Non-Existent of the repository or Permission to Access the repository issues. This runbook helps to walk through the steps involved in debugging such a Pod.
\n", "
We'll then create the steps required to resolve the issue - learning how to use unSkript at the same time.
\n", "If you haven't already - click \"Run Action above to see a YouTube video that will begin walking you through the process.
\n", "This custom action changes the type of namespace from None to String only if no namespace is given
" ] }, { "cell_type": "code", "execution_count": 3, "id": "a49bd0a5-1b34-4beb-940d-9f28239837e0", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-06-07T16:32:36.279Z" }, "jupyter": { "source_hidden": true }, "name": "Convert namespace to String if empty", "orderProperties": [], "tags": [], "title": "Convert namespace to String if empty", "trusted": true }, "outputs": [], "source": [ "if namespace==None:\n", " namespace=''\n", " " ] }, { "cell_type": "markdown", "id": "6cdb0116-152b-493c-8eb9-71237b691806", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 1", "orderProperties": [], "tags": [], "title": "Step 1" }, "source": [ "This action fetches a list of the pods in ImagePullBackOff State. This action will consider namespace as all if no namespace is given.
\n", "\n", "This action takes the following parameters (Optional):
\n", "namespace
\n", "" ] }, { "cell_type": "code", "execution_count": 4, "id": "fbfd4282-2516-4506-b617-c6816736dbea", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "683b7f1a1482a5bed32698689e2b47e13dcdb5e00d719316cc46ada5ead26758", "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Get all K8s pods in ImagePullBackOff State", "execution_data": {}, "id": 45, "index": 45, "inputData": [ { "namespace": { "constant": false, "value": "namespace" } } ], "inputschema": [ { "properties": { "namespace": { "default": "", "description": "k8s Namespace", "title": "Namespace", "type": "string" } }, "title": "k8s_get_pods_in_imagepullbackoff_state", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Get all K8s Pods in ImagePullBackOff State", "nouns": [], "orderProperties": [ "namespace" ], "output": { "type": "" }, "outputParams": { "output_name": "imagepullbackoff_pods", "output_name_enabled": true }, "printOutput": true, "probeEnabled": false, "tags": [ "k8s_get_pods_in_imagepullbackoff_state" ], "title": "Get all K8s Pods in ImagePullBackOff State", "trusted": true, "verbs": [], "execution_count": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, Tuple\n", "from unskript.legos.utils import CheckOutput, CheckOutputStatus\n", "from collections import defaultdict\n", "import json\n", "import pprint\n", "import re\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_get_pods_in_imagepullbackoff_state_printer(output):\n", " if output is None:\n", " return\n", " if isinstance(output, CheckOutput):\n", " print(output.json())\n", " else:\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def k8s_get_pods_in_imagepullbackoff_state(handle, namespace: str=None) -> Tuple:\n", " \"\"\"k8s_get_list_of_pods_with_imagepullbackoff_state executes the given kubectl command to find pods in ImagePullBackOff State\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type namespace: Optional[str]\n", " :param namespace: Namespace to get the pods from. Eg:\"logging\", if not given all namespaces are considered\n", "\n", " :rtype: Status, List of pods in CrashLoopBackOff State\n", " \"\"\"\n", " if handle.client_side_validation != True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", " kubectl_command =\"kubectl get pods --all-namespaces | grep ImagePullBackOff | tr -s ' ' | cut -d ' ' -f 1,2\"\n", " if namespace:\n", " kubectl_command = \"kubectl get pods -n \" + namespace + \" | grep ImagePullBackOff | cut -d' ' -f 1 | tr -d ' '\"\n", " response = handle.run_native_cmd(kubectl_command)\n", " if response is None or hasattr(response, \"stderr\") is False or response.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {response.stderr}\")\n", " return str()\n", " temp = response.stdout\n", " result = []\n", " res = []\n", " unhealthy_pods =[]\n", " unhealthy_pods_tuple = ()\n", " if not namespace:\n", " all_namespaces = re.findall(r\"(\\S+).*\",temp)\n", " all_unhealthy_pods = re.findall(r\"\\S+\\s+(.*)\",temp)\n", " unhealthy_pods = [(i, j) for i, j in zip(all_namespaces, all_unhealthy_pods)]\n", " res = defaultdict(list)\n", " for key, val in unhealthy_pods:\n", " res[key].append(val)\n", " elif namespace:\n", " all_pods = []\n", " all_unhealthy_pods =[]\n", " all_pods = re.findall(r\"(\\S+).*\",temp)\n", " for p in all_pods:\n", " unhealthy_pods_tuple = (namespace,p)\n", " unhealthy_pods.append(unhealthy_pods_tuple)\n", " res = defaultdict(list)\n", " for key, val in unhealthy_pods:\n", " res[key].append(val)\n", " if len(res)!=0:\n", " result.append(dict(res))\n", " if len(result) != 0:\n", " return (False, result)\n", " else:\n", " return (True, None)\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"namespace\": \"namespace\"\n", " }''')\n", "\n", "task.configure(outputName=\"imagepullbackoff_pods\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_get_pods_in_imagepullbackoff_state, lego_printer=k8s_get_pods_in_imagepullbackoff_state_printer, hdl=hdl, args=args)" ] }, { "cell_type": "code", "execution_count": 6, "id": "b273811b-9921-4786-9808-230187591944", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-06-07T16:34:59.845Z" }, "name": "Video 2: Click Run Action -->", "orderProperties": [], "tags": [], "title": "Video 2: Click Run Action -->", "trusted": true }, "outputs": [], "source": [ "%%html\n", "" ] }, { "cell_type": "markdown", "id": "7b195002-2041-48dc-a7de-3ca871925e58", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 1A", "orderProperties": [], "tags": [], "title": "Step 1A" }, "source": [ "This action captures the following ouput:
\n", "imagepullbackoff_pods
Examine the output from Step 1\ud83d\udc46, and create a list of commands for each pod in a namespace that is found to be in the ImagePullBackOff State
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 7, "id": "52ca8812-faef-4953-a4e6-94ba17bb5c17", "metadata": { "collapsed": true, "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-06-07T16:35:03.045Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Create List of commands to get Events", "orderProperties": [], "tags": [], "title": "Create List of commands to get Events", "trusted": true }, "outputs": [], "source": [ "all_unhealthy_pods = []\n", "for each_pod_dict in imagepullbackoff_pods:\n", " if type(each_pod_dict)==list:\n", " for pod in each_pod_dict:\n", " for k,v in pod.items():\n", " if len(v)!=0:\n", " nspace = k\n", " u_pod = ' '.join([str(each_pod) for each_pod in v])\n", " cmd = \"kubectl describe pod \"+u_pod+\" -n \"+nspace+\" | grep -A 10 Events\"\n", " all_unhealthy_pods.append(cmd)\n", "print(all_unhealthy_pods)" ] }, { "cell_type": "markdown", "id": "380b03f3-b09c-4836-8d50-15ee8021d0e4", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following ouput:
\n", "all_unhealthy_pods
This action describes events for a list of unhealthy pods obtained in Step 1.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 6, "id": "cae3c677-fe96-4d0e-9d64-1b11abd00883", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "4d3b4c6153e14622f42b332b7c7b8f7043577971f64edc5be6b5f8b40d5b89d1", "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Execute the given Kubectl command.", "execution_data": {}, "id": 51, "index": 51, "inputData": [ { "kubectl_command": { "constant": false, "value": "iter_item" } } ], "inputschema": [ { "properties": { "kubectl_command": { "description": "kubectl command eg \"kubectl get pods --all-namespaces\"", "title": "Kubectl Command", "type": "string" } }, "required": [ "kubectl_command" ], "title": "k8s_kubectl_command", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": "kubectl_command", "iter_list": { "constant": false, "objectItems": false, "value": "all_unhealthy_pods" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Extract Events for the Pods", "nouns": [], "orderProperties": [ "kubectl_command" ], "output": { "type": "" }, "outputParams": { "output_name": "describe_output", "output_name_enabled": true }, "printOutput": true, "startcondition": "len(all_unhealthy_pods)!=0", "tags": [ "k8s_kubectl_command" ], "title": "Extract Events for the Pods", "verbs": [], "execution_count": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_command_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def k8s_kubectl_command(handle, kubectl_command: str) -> str:\n", " \"\"\"k8s_kubectl_command executes the given kubectl command on the pod\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type kubectl_command: str\n", " :param kubectl_command: The Actual kubectl command, like kubectl get ns, etc..\n", "\n", " :rtype: String, Output of the command in python string format or Empty String in case of Error.\n", " \"\"\"\n", " if handle.client_side_validation != True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result is None or hasattr(result, \"stderr\") is False or result.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return str()\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"kubectl_command\": \"iter_item\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"all_unhealthy_pods\",\n", " \"iter_parameter\": \"kubectl_command\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(all_unhealthy_pods)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"describe_output\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_command, lego_printer=k8s_kubectl_command_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "67287ce3-806d-458b-9fe5-ed0e6b146252", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2B", "orderProperties": [], "tags": [], "title": "Step 2B" }, "source": [ "This action captures the following ouput:
\n", "describe_output
This Custom Action searches Known errors . The well known errors are listed in the error_msgs variable. If there is a new error message that was found, it can be added to the list.
" ] }, { "cell_type": "code", "execution_count": 8, "id": "6df7c408-377b-4ea8-a33c-ff3c5329fbaa", "metadata": { "credentialsJson": {}, "execution_data": { "last_date_success_run_cell": "2023-05-25T16:26:31.562Z" }, "jupyter": { "source_hidden": true }, "name": "Examine Events", "orderProperties": [], "tags": [], "title": "Examine Events" }, "outputs": [], "source": [ "import re\n", "\n", "\"\"\"\n", "This Custom Action searches Known errors in the podEvents variable.\n", "The well known errors are listed in the error_msgs variable. If\n", "there is a new error message that was found, you can add it to this\n", "list and the next run, the runbook will catch that error.\n", "\"\"\"\n", "\n", "def check_msg(msg, err):\n", " return re.search(err, msg)\n", "\n", "error_msgs = [\"repository (.*) does not exist or no pull access\",\n", " \"manifest for (.*) not found\",\n", " \"pull access denied, repository does not exist or may require authorization\",\n", " \"Back-off pulling image (.*)\"]\n", "cause_found = False\n", "result = ''\n", "for key, msg in describe_output.items():\n", " for err in error_msgs:\n", " result = check_msg(msg, err)\n", " if result is not None:\n", " print(\"PROBABLE CAUSE: \", f\"{result.string}\")\n", " cause_found = True\n", "\n", "repoLocation = ''\n", "if cause_found is False:\n", " print(\"ERROR MESSAGE : \\n\", all_describe_info)\n", "else:\n", " try:\n", " repoLocation = result.groups()[0]\n", " except:\n", " pass\n", " else:\n", " print(\"Image Repo Location : \", repoLocation)" ] }, { "cell_type": "markdown", "id": "83081ee6-55c1-4f82-923b-ed6c4e054d35", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 3", "orderProperties": [], "tags": [], "title": "Step 3" }, "source": [ "From the output from Step 2B\ud83d\udc46check if the repoLocation is accessible.
" ] }, { "cell_type": "code", "execution_count": 9, "id": "d3fbe0a1-6669-490f-8ffc-3e4e11a32156", "metadata": { "credentialsJson": {}, "execution_data": { "last_date_success_run_cell": "2023-05-25T16:26:41.642Z" }, "jupyter": { "source_hidden": true }, "name": "Check Registry Accessibility", "orderProperties": [], "tags": [], "title": "Check Registry Accessibility" }, "outputs": [], "source": [ "from IPython.display import Markdown as md\n", "\n", "if repoLocation is not None:\n", " display(md(f\"**Please verify Repo {repoLocation} is accessible from the K8S POD**\"))" ] }, { "cell_type": "code", "execution_count": 9, "id": "e68888fe-d002-49af-9196-cebc01594dbc", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-06-07T16:39:45.975Z" }, "jupyter": { "source_hidden": true }, "name": "Video 3: Click Run Action -->", "orderProperties": [], "tags": [], "title": "Video 3: Click Run Action -->", "trusted": true }, "outputs": [], "source": [ "%%html\n", "" ] }, { "cell_type": "markdown", "id": "094afbe4-6ea9-4c02-883b-55c4754422c8", "metadata": { "name": "Here is the Code for Step 3a, 3b, 3c", "orderProperties": [], "tags": [], "title": "Here is the Code for Step 3a, 3b, 3c" }, "source": [ "Step 3a: ADdthis to an ACtion (add -> Action)
\n", "patchCommand= \"kubectl patch pod image-pullback -n \" + namespace + ' -p \\'{\"spec\":{\"containers\":[{\"name\":\"image-pullback-container\", \"image\":\"debian\"}]}}\\''
\n", "\n", "
Step 3b: Search actions on the Riught menu for \"Kubectl Command.\" Drag this action in, add your K8s credentials.
\n", "\n", "
Add this to the Kubectl Command
\n", "patchCommand
\n", "\n", "
Step 3c:
\n", "Drag in a second \"Kubectl Command\" action, add your K8s credentials.
\n", "Add this to the Kubectl Command:
\n", "f'kubectl get pods -n {namespace}'
\n", "" ] } ], "metadata": { "execution_data": { "runbook_name": "k8s: Pod Stuck in ImagePullBackOff State", "parameters": [ "namespace" ] }, "kernelspec": { "display_name": "unSkript (Build: 1172)", "name": "python3" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3" }, "outputParameterSchema": null, "parameterSchema": { "definitions": null, "properties": { "namespace": { "description": "K8S Namespace", "title": "namespace", "type": "string" } }, "required": [ "namespace" ], "title": "Schema", "type": "object" }, "parameterValues": {} }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: Kubernetes/K8S_Pod_Stuck_In_ImagePullBackOff_State.json ================================================ { "name": "k8s: Pod Stuck in ImagePullBackOff State", "description": "This runbook checks if any Pod(s) in ImagePullBackOff state in a given k8s namespace. If it finds, it tries to find out the reason why the Pod(s) is in that state.", "uuid": "a53b5860500e142aa387ce55d5e85f139596c521dfb5c920cc2bc47c38fc0b11", "icon": "CONNECTOR_TYPE_K8S", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_K8S" ], "version": "1.0.0" } ================================================ FILE: Kubernetes/K8S_Pod_Stuck_In_ImagePullBackOff_State_with_genai.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "f518e5b7-08a7-425c-9d86-cfc629d5b355", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "
1) Get list of pods in ImagePullBackOff State
2) Extract Events of the pods
3) Check registry accessibility
An ImagePullBackOff error occurs when a Pod startup fails to pull the specified image. The reasons could be Non-Existent of the repository or Permission to Access the repository issues. This runbook helps to walk through the steps involved in debugging such a Pod. We'll then create the steps required to resolve the issue - learning how to use unSkript at the same time.
This action fetches a list of the pods in ImagePullBackOff State. This action will consider namespace as all if no namespace is given.
\n", "\n", "This action takes the following parameters:
\n", "namespace
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "1de4e931-bb27-47d0-bb67-3ff9696d41e2", "metadata": { "actionIsGenAI": true, "show_tool_tip_gen_ai_chat": "openChat", "tool_tip_gen_ai_chat_first_message":"write a function to get list of pods in ImagePullBackOff State with namespace as a required parameter. Use container status to evaluate this condition. It should only return the pod name.", "customAction": true, "orderProperties": [], "tags": [], "credentialsJson": {} }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 4, "id": "b273811b-9921-4786-9808-230187591944", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-08-01T23:17:30.711Z" }, "jupyter": { "source_hidden": true }, "name": "Video 2: Click Run Action -->", "orderProperties": [], "tags": [], "title": "Video 2: Click Run Action -->" }, "outputs": [], "source": [ "%%html\n", "" ] }, { "cell_type": "markdown", "id": "7b195002-2041-48dc-a7de-3ca871925e58", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 1A", "orderProperties": [], "tags": [], "title": "Step 1A" }, "source": [ "This action captures the following ouput:
\n", "imagepullbackoff_pods
Examine the output from Step 1\ud83d\udc46, and create a list of commands for each pod in a namespace that is found to be in the ImagePullBackOff State
\n", "\n", "" ] }, { "cell_type": "markdown", "id": "380b03f3-b09c-4836-8d50-15ee8021d0e4", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following ouput:
\n", "imagepullbackoff_pods
This action describes events for a list of unhealthy pods obtained in Step 1.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 5, "id": "cae3c677-fe96-4d0e-9d64-1b11abd00883", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "4d3b4c6153e14622f42b332b7c7b8f7043577971f64edc5be6b5f8b40d5b89d1", "condition_enabled": true, "continueOnError": true, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Execute the given Kubectl command.", "execution_data": { "last_date_success_run_cell": "2023-08-01T23:45:19.929Z" }, "id": 51, "index": 51, "inputData": [ { "kubectl_command": { "constant": false, "value": "iter_item" } } ], "inputschema": [ { "properties": { "kubectl_command": { "description": "kubectl command eg \"kubectl get pods --all-namespaces\"", "title": "Kubectl Command", "type": "string" } }, "required": [ "kubectl_command" ], "title": "k8s_kubectl_command", "type": "object" } ], "iterData": [ { "iter_enabled": true, "iter_item": "kubectl_command", "iter_list": { "constant": false, "objectItems": false, "value": "[ f\"kubectl describe pod {x} -n {namespace} | grep -A 10 Events\" for x in imagepullbackoff_pods ]" } } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Extract Events for the Pods", "nouns": [], "orderProperties": [ "kubectl_command" ], "output": { "type": "" }, "outputParams": { "output_name": "describe_output", "output_name_enabled": true }, "printOutput": true, "service_id_enabled": false, "startcondition": "len(imagepullbackoff_pods)!=0", "tags": [ "k8s_kubectl_command" ], "title": "Extract Events for the Pods", "verbs": [] }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_command_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def k8s_kubectl_command(handle, kubectl_command: str) -> str:\n", " \"\"\"k8s_kubectl_command executes the given kubectl command on the pod\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type kubectl_command: str\n", " :param kubectl_command: The Actual kubectl command, like kubectl get ns, etc..\n", "\n", " :rtype: String, Output of the command in python string format or Empty String in case of Error.\n", " \"\"\"\n", " if handle.client_side_validation != True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result is None or hasattr(result, \"stderr\") is False or result.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return str()\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(continueOnError=True)\n", "task.configure(inputParamsJson='''{\n", " \"kubectl_command\": \"iter_item\"\n", " }''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"[ f\\\\\"kubectl describe pod {x} -n {namespace} | grep -A 10 Events\\\\\" for x in imagepullbackoff_pods ]\",\n", " \"iter_parameter\": \"kubectl_command\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"len(imagepullbackoff_pods)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"describe_output\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_command, lego_printer=k8s_kubectl_command_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "67287ce3-806d-458b-9fe5-ed0e6b146252", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2B", "orderProperties": [], "tags": [], "title": "Step 2B" }, "source": [ "This action captures the following ouput:
\n", "describe_output
This Custom Action searches Known errors . The well known errors are listed in the error_msgs variable. If there is a new error message that was found, it can be added to the list.
" ] }, { "cell_type": "code", "execution_count": 6, "id": "6df7c408-377b-4ea8-a33c-ff3c5329fbaa", "metadata": { "credentialsJson": {}, "execution_data": { "last_date_success_run_cell": "2023-08-01T23:45:28.944Z" }, "jupyter": { "source_hidden": true }, "name": "Examine Events", "orderProperties": [], "tags": [], "title": "Examine Events" }, "outputs": [], "source": [ "import re\n", "\n", "\"\"\"\n", "This Custom Action searches Known errors in the podEvents variable.\n", "The well known errors are listed in the error_msgs variable. If\n", "there is a new error message that was found, you can add it to this\n", "list and the next run, the runbook will catch that error.\n", "\"\"\"\n", "\n", "def check_msg(msg, err):\n", " return re.search(err, msg)\n", "\n", "error_msgs = [\"repository (.*) does not exist or no pull access\",\n", " \"manifest for (.*) not found\",\n", " \"pull access denied, repository does not exist or may require authorization\",\n", " \"Back-off pulling image (.*)\"]\n", "cause_found = False\n", "result = ''\n", "for key, msg in describe_output.items():\n", " for err in error_msgs:\n", " result = check_msg(msg, err)\n", " if result is not None:\n", " print(\"PROBABLE CAUSE: \", f\"{result.string}\")\n", " cause_found = True\n", "\n", "repoLocation = ''\n", "if cause_found is False:\n", " print(\"ERROR MESSAGE : \\n\", all_describe_info)\n", "else:\n", " try:\n", " repoLocation = result.groups()[0]\n", " except:\n", " pass\n", " else:\n", " print(\"Image Repo Location : \", repoLocation)" ] }, { "cell_type": "markdown", "id": "83081ee6-55c1-4f82-923b-ed6c4e054d35", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 3", "orderProperties": [], "tags": [], "title": "Step 3" }, "source": [ "From the output from Step 2B\ud83d\udc46check if the repoLocation is accessible.
" ] }, { "cell_type": "code", "execution_count": 18, "id": "d3fbe0a1-6669-490f-8ffc-3e4e11a32156", "metadata": { "credentialsJson": {}, "execution_data": { "last_date_success_run_cell": "2023-08-01T23:40:25.833Z" }, "jupyter": { "source_hidden": true }, "name": "Check Registry Accessibility", "orderProperties": [], "tags": [], "title": "Check Registry Accessibility" }, "outputs": [], "source": [ "from IPython.display import Markdown as md\n", "\n", "if repoLocation is not None:\n", " display(md(f\"**Please verify Repo {repoLocation} is accessible from the K8S POD**\"))" ] }, { "cell_type": "code", "execution_count": 19, "id": "e68888fe-d002-49af-9196-cebc01594dbc", "metadata": { "credentialsJson": {}, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-08-01T23:40:29.611Z" }, "jupyter": { "source_hidden": true }, "name": "Video 3: Click Run Action -->", "orderProperties": [], "tags": [], "title": "Video 3: Click Run Action -->" }, "outputs": [], "source": [ "%%html\n", "" ] }, { "cell_type": "markdown", "id": "094afbe4-6ea9-4c02-883b-55c4754422c8", "metadata": { "jupyter": { "source_hidden": false }, "name": "Here is the Code for Step 3a, 3b, 3c", "orderProperties": [], "tags": [], "title": "Here is the Code for Step 3a, 3b, 3c" }, "source": [ "Step 3a: Add this to an Action (add -> Action)
\n", "patchCommand= \"kubectl patch pod image-pullback -n \" + namespace + ' -p \\'{\"spec\":{\"containers\":[{\"name\":\"image-pullback-container\", \"image\":\"debian\"}]}}\\''
\n", "\n", "
Step 3b: Search actions on the Riught menu for \"Kubectl Command.\" Drag this action in, add your K8s credentials.
\n", "\n", "
Add this to the Kubectl Command
\n", "patchCommand
\n", "\n", "
Step 3c:
\n", "Drag in a second \"Kubectl Command\" action, add your K8s credentials.
\n", "Add this to the Kubectl Command:
\n", "f'kubectl get pods -n {namespace}'
\n", "" ] } ], "metadata": { "execution_data": { "runbook_name": "k8s: Pod Stuck in ImagePullBackOff State using genAI", "parameters": [ "environment", "namespace" ] }, "kernelspec": { "display_name": "unSkript (Build: 1248)", "name": "python_kubernetes" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3" }, "outputParameterSchema": null, "parameterSchema": { "definitions": null, "properties": { "environment": { "default": "", "description": "Name of the environment, associated with the credential", "title": "environment", "type": "string" }, "namespace": { "default": "0bb055c9-1d76-4570-a173-54eefecc7e42", "description": "K8S Namespace", "title": "namespace", "type": "string" } }, "required": [ "namespace", "environment" ], "title": "Schema", "type": "object" }, "parameterValues": null }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: Kubernetes/K8S_Pod_Stuck_In_ImagePullBackOff_State_with_genai.json ================================================ { "name": "k8s: Pod Stuck in ImagePullBackOff State using genAI", "description": "This runbook checks if any Pod(s) in ImagePullBackOff state in a given k8s namespace, using genAI. If it finds, it tries to find out the reason why the Pod(s) is in that state.", "uuid": "4ece5a97491d3df93e6a2ec483d1bc554ee484a6b5bc8d91f03775d961a5400b", "icon": "CONNECTOR_TYPE_K8S", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_K8S" ], "version": "1.0.0" } ================================================ FILE: Kubernetes/K8S_Pod_Stuck_In_Terminating_State.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "642c0464-7f6e-484f-ba43-bcd8d030f6f5", "metadata": { "jupyter": { "source_hidden": false }, "name": "Overview", "orderProperties": [], "tags": [], "title": "Overview" }, "source": [ "
\n", "
1) Get pods stuck in Terminating State
2) Check for finalizers
3) Get Node Information
4) Force-delete the pod
5) Check Resolution
6) Further steps
A Pod has been deleted but remains in Terminating Status
This can happen for either of the reasons:
\n", "1. Pod has a finalizer associated with it and that is not completing\n",
"2. The Pod is not responding to termination signals\n",
"\n",
"The output of kubectl get pods [PODNAME] -n [NAMESPACE] will show something like this:
NAME READY STATUS RESTARTS AGE\n",
"nginx-7ef9efa7cd-qasd2 1/1 Terminating 0 1h\n",
"\n",
"This custom action changes the type of namespace from None to String only if no namespace is given
" ] }, { "cell_type": "code", "execution_count": null, "id": "74e67045-d7e7-4116-8714-19d880552650", "metadata": { "collapsed": true, "customAction": true, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Convert namespace to String if empty", "orderProperties": [], "tags": [], "title": "Convert namespace to String if empty", "credentialsJson": {} }, "outputs": [], "source": [ "if namespace==None:\n", " namespace=''" ] }, { "cell_type": "markdown", "id": "fc9ce20e-22ae-49e8-b439-c189a902b2a4", "metadata": { "name": "Step 1", "orderProperties": [], "tags": [], "title": "Step 1" }, "source": [ "This action fetches a list of the pods in Terminating State. This action will consider namespace as all if no namespace is given.
\n", "\n", "This action takes the following parameters (Optional):
\n", "namespace
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "b2809d57-03b2-41a3-9b57-f544e4ac32fa", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "4d3b4c6153e14622f42b332b7c7b8f7043577971f64edc5be6b5f8b40d5b89d1", "checkEnabled": false, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Execute the given Kubectl command.", "id": 55, "index": 55, "inputData": [ { "kubectl_command": { "constant": false, "value": "f\"kubectl get pods -n {namespace} | grep Terminating | cut -d' ' -f 1\"" } } ], "inputschema": [ { "properties": { "kubectl_command": { "description": "kubectl command eg \"kubectl get pods --all-namespaces\"", "title": "Kubectl Command", "type": "string" } }, "required": [ "kubectl_command" ], "title": "k8s_kubectl_command", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Get pods stuck in Terminating State", "nouns": [], "orderProperties": [ "kubectl_command" ], "output": { "type": "" }, "outputParams": { "output_name": "terminatingPods", "output_name_enabled": true }, "printOutput": true, "tags": [ "k8s_kubectl_command" ], "title": "Get pods stuck in Terminating State", "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_command_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def k8s_kubectl_command(handle, kubectl_command: str) -> str:\n", " \"\"\"k8s_kubectl_command executes the given kubectl command on the pod\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type kubectl_command: str\n", " :param kubectl_command: The Actual kubectl command, like kubectl get ns, etc..\n", "\n", " :rtype: String, Output of the command in python string format or Empty String in case of Error.\n", " \"\"\"\n", " if handle.client_side_validation != True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", "\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result is None or hasattr(result, \"stderr\") is False or result.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return str()\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"kubectl_command\": \"f\\\\\"kubectl get pods -n {namespace} | grep Terminating | cut -d' ' -f 1\\\\\"\"\n", " }''')\n", "\n", "task.configure(outputName=\"terminatingPods\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_command, lego_printer=k8s_kubectl_command_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "bb2f595c-18ab-4415-baba-5f7cac36d936", "metadata": { "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following ouput:
\n", "terminating_pods
This action checks for finalizer. If it does, their failure to complete may be the root cause.
\n", "\n", "\n", "This action takes the following parameters:
\n", "namespace, terminatingPods
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "3ae96e00-4f46-461a-afd9-2db939414f0a", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "4d3b4c6153e14622f42b332b7c7b8f7043577971f64edc5be6b5f8b40d5b89d1", "checkEnabled": false, "condition_enabled": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Execute the given Kubectl command.", "id": 55, "index": 55, "inputData": [ { "kubectl_command": { "constant": false, "value": "f\"kubectl get pod -n {namespace} {terminatingPods.strip()} -o yaml | grep -A 1 finalizers\" " } } ], "inputschema": [ { "properties": { "kubectl_command": { "description": "kubectl command eg \"kubectl get pods --all-namespaces\"", "title": "Kubectl Command", "type": "string" } }, "required": [ "kubectl_command" ], "title": "k8s_kubectl_command", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Check for Finalizer", "nouns": [], "orderProperties": [ "kubectl_command" ], "output": { "type": "" }, "outputParams": { "output_name": "finalizerOutput", "output_name_enabled": true }, "printOutput": true, "startcondition": "terminatingPods is not ''", "tags": [ "k8s_kubectl_command" ], "title": "Check for Finalizer", "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_command_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def k8s_kubectl_command(handle, kubectl_command: str) -> str:\n", " \"\"\"k8s_kubectl_command executes the given kubectl command on the pod\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type kubectl_command: str\n", " :param kubectl_command: The Actual kubectl command, like kubectl get ns, etc..\n", "\n", " :rtype: String, Output of the command in python string format or Empty String in case of Error.\n", " \"\"\"\n", " if handle.client_side_validation != True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", "\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result is None or hasattr(result, \"stderr\") is False or result.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return str()\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"kubectl_command\": \"f\\\\\"kubectl get pod -n {namespace} {terminatingPods.strip()} -o yaml | grep -A 1 finalizers\\\\\" \"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"terminatingPods is not ''\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"finalizerOutput\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_command, lego_printer=k8s_kubectl_command_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "b3a1c250-a232-4bdc-8e9b-25e462871800", "metadata": { "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following ouput:
\n", "finalizerOutput
This action takes input from Step 2 and removes finalizer if present
\n", "\n", "\n", "This action takes the following parameters:
\n", "namespace
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "9cea9d29-8443-4fd9-a6f2-90501fdb652c", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "4d3b4c6153e14622f42b332b7c7b8f7043577971f64edc5be6b5f8b40d5b89d1", "checkEnabled": false, "condition_enabled": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Execute the given Kubectl command.", "id": 55, "index": 55, "inputData": [ { "kubectl_command": { "constant": false, "value": "f\"kubectl patch pod {terminatingPods.strip()}\" + \" -p '{\\\\\"metadata\\\\\":{\\\\\"finalizers\\\\\":null}}'\"" } } ], "inputschema": [ { "properties": { "kubectl_command": { "description": "kubectl command eg \"kubectl get pods --all-namespaces\"", "title": "Kubectl Command", "type": "string" } }, "required": [ "kubectl_command" ], "title": "k8s_kubectl_command", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Remove finalizer if present", "nouns": [], "orderProperties": [ "kubectl_command" ], "output": { "type": "" }, "outputParams": { "output_name": "removeFinalizerOutput", "output_name_enabled": true }, "printOutput": true, "startcondition": "finalizerOutput is not ''", "tags": [ "k8s_kubectl_command" ], "title": "Remove finalizer if present", "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_command_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def k8s_kubectl_command(handle, kubectl_command: str) -> str:\n", " \"\"\"k8s_kubectl_command executes the given kubectl command on the pod\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type kubectl_command: str\n", " :param kubectl_command: The Actual kubectl command, like kubectl get ns, etc..\n", "\n", " :rtype: String, Output of the command in python string format or Empty String in case of Error.\n", " \"\"\"\n", " if handle.client_side_validation != True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", "\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result is None or hasattr(result, \"stderr\") is False or result.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return str()\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"kubectl_command\": \"f\\\\\"kubectl patch pod {terminatingPods.strip()}\\\\\" + \\\\\" -p '{\\\\\\\\\\\\\"metadata\\\\\\\\\\\\\":{\\\\\\\\\\\\\"finalizers\\\\\\\\\\\\\":null}}'\\\\\"\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"finalizerOutput is not ''\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"removeFinalizerOutput\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_command, lego_printer=k8s_kubectl_command_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "a813187c-9310-41fc-a4f8-e2d61781baea", "metadata": { "name": "Step 3", "orderProperties": [], "tags": [], "title": "Step 3" }, "source": [ "This action captures the following ouput:
\n", "finalizerOutput
This action gets the nodes information to check for it's status (Step 3A)
\n", "\n", "\n", "This action takes the following parameters:
\n", "namespace
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "e70ff97f-2147-4026-bd92-8fb1851b6ce6", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "4d3b4c6153e14622f42b332b7c7b8f7043577971f64edc5be6b5f8b40d5b89d1", "checkEnabled": false, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Execute the given Kubectl command.", "id": 55, "index": 55, "inputData": [ { "kubectl_command": { "constant": false, "value": "f\"kubectl get pods {terminatingPods.strip()} -n {namespace} -o yaml | grep nodeName | tr -d \\\\\" \\\\\" | cut -d':' -f 2\" " } } ], "inputschema": [ { "properties": { "kubectl_command": { "description": "kubectl command eg \"kubectl get pods --all-namespaces\"", "title": "Kubectl Command", "type": "string" } }, "required": [ "kubectl_command" ], "title": "k8s_kubectl_command", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Get Node Information", "nouns": [], "orderProperties": [ "kubectl_command" ], "output": { "type": "" }, "outputParams": { "output_name": "nodeName", "output_name_enabled": true }, "printOutput": true, "tags": [ "k8s_kubectl_command" ], "title": "Get Node Information", "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_command_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def k8s_kubectl_command(handle, kubectl_command: str) -> str:\n", " \"\"\"k8s_kubectl_command executes the given kubectl command on the pod\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type kubectl_command: str\n", " :param kubectl_command: The Actual kubectl command, like kubectl get ns, etc..\n", "\n", " :rtype: String, Output of the command in python string format or Empty String in case of Error.\n", " \"\"\"\n", " if handle.client_side_validation != True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", "\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result is None or hasattr(result, \"stderr\") is False or result.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return str()\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"kubectl_command\": \"f\\\\\"kubectl get pods {terminatingPods.strip()} -n {namespace} -o yaml | grep nodeName | tr -d \\\\\\\\\\\\\" \\\\\\\\\\\\\" | cut -d':' -f 2\\\\\" \"\n", " }''')\n", "task.configure(outputName=\"nodeName\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_command, lego_printer=k8s_kubectl_command_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "fd26240c-3c7f-42a0-b021-94cfdab6bd6d", "metadata": { "name": "Ste 3A", "orderProperties": [], "tags": [], "title": "Ste 3A" }, "source": [ "This action captures the following ouput:
\n", "nodeName
This action gets the status of node. It is possible that the node your pod(s) is/are running on has failed in some way. If all pods on the same node are in a Terminating state on a specific node, then this may be the issue.
\n", "\n", "This action takes the following parameters:
\n", "namespace
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "86992954-67e8-423a-a85e-6cfdbf933b99", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "4d3b4c6153e14622f42b332b7c7b8f7043577971f64edc5be6b5f8b40d5b89d1", "checkEnabled": false, "condition_enabled": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Execute the given Kubectl command.", "id": 55, "index": 55, "inputData": [ { "kubectl_command": { "constant": false, "value": "f\"kubectl get pods -n {namespace} {nodeName} | grep \\\\\" Ready\\\\\" | cut -d' ' -f 1 | tr -d ' '\" " } } ], "inputschema": [ { "properties": { "kubectl_command": { "description": "kubectl command eg \"kubectl get pods --all-namespaces\"", "title": "Kubectl Command", "type": "string" } }, "required": [ "kubectl_command" ], "title": "k8s_kubectl_command", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Check Node Status", "nouns": [], "orderProperties": [ "kubectl_command" ], "output": { "type": "" }, "outputParams": { "output_name": "nodeStatus", "output_name_enabled": true }, "printOutput": true, "startcondition": "nodeName is not ''", "tags": [ "k8s_kubectl_command" ], "title": "Check Node Status", "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_command_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def k8s_kubectl_command(handle, kubectl_command: str) -> str:\n", " \"\"\"k8s_kubectl_command executes the given kubectl command on the pod\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type kubectl_command: str\n", " :param kubectl_command: The Actual kubectl command, like kubectl get ns, etc..\n", "\n", " :rtype: String, Output of the command in python string format or Empty String in case of Error.\n", " \"\"\"\n", " if handle.client_side_validation != True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", "\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result is None or hasattr(result, \"stderr\") is False or result.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return str()\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"kubectl_command\": \"f\\\\\"kubectl get pods -n {namespace} {nodeName} | grep \\\\\\\\\\\\\" Ready\\\\\\\\\\\\\" | cut -d' ' -f 1 | tr -d ' '\\\\\" \"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"nodeName is not ''\",\n", " \"condition_result\": true\n", " }''')\n", "task.configure(outputName=\"nodeStatus\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_command, lego_printer=k8s_kubectl_command_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "b9e8faf9-9ea6-4170-b7ae-76c74bfb012c", "metadata": { "name": "Step 4", "orderProperties": [], "tags": [], "title": "Step 4" }, "source": [ "This action captures the following ouput:
\n", "nodeStatus
This action force deletes a pod. The pod may not be terminating due to a process that is not responding to a signal. The exact reason will be context-specific and application dependent. Common causes include:
\n", "A tight loop in userspace code that does not allow for interrupt signals
\n", "A maintenance process (eg garbage collection) on the application runtime
\n", "\n", "\n", "This action takes the following parameters:
\n", "namespace
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "2b0381ff-38a0-4d0e-baaf-f53328af5c15", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "4d3b4c6153e14622f42b332b7c7b8f7043577971f64edc5be6b5f8b40d5b89d1", "checkEnabled": false, "condition_enabled": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Execute the given Kubectl command.", "id": 55, "index": 55, "inputData": [ { "kubectl_command": { "constant": false, "value": "f\"kubectl delete pod {terminatingPods.strip()} -n {namespace} --now\"" } } ], "inputschema": [ { "properties": { "kubectl_command": { "description": "kubectl command eg \"kubectl get pods --all-namespaces\"", "title": "Kubectl Command", "type": "string" } }, "required": [ "kubectl_command" ], "title": "k8s_kubectl_command", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Force-delete the Pod", "nouns": [], "orderProperties": [ "kubectl_command" ], "output": { "type": "" }, "printOutput": true, "startcondition": "terminatingPods is not ''", "tags": [ "k8s_kubectl_command" ], "title": "Force-delete the Pod", "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_command_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def k8s_kubectl_command(handle, kubectl_command: str) -> str:\n", " \"\"\"k8s_kubectl_command executes the given kubectl command on the pod\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type kubectl_command: str\n", " :param kubectl_command: The Actual kubectl command, like kubectl get ns, etc..\n", "\n", " :rtype: String, Output of the command in python string format or Empty String in case of Error.\n", " \"\"\"\n", " if handle.client_side_validation != True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", "\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result is None or hasattr(result, \"stderr\") is False or result.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return str()\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"kubectl_command\": \"f\\\\\"kubectl delete pod {terminatingPods.strip()} -n {namespace} --now\\\\\"\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"terminatingPods is not ''\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_command, lego_printer=k8s_kubectl_command_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "e83f0bff-934f-4980-83b7-56ca3a4e62c3", "metadata": { "name": "Step 5", "orderProperties": [], "tags": [], "title": "Step 5" }, "source": [ "This action captures the following ouput:
\n", "nodeStatus
This action runs get pods command and if the specific pod no longer shows up when running kubectl get pods, then the issue has been resolved.
\n", "\n", "This action takes the following parameters:
\n", "namespace
\n", "\n", "This action captures the following ouput:
\n", "checkResolution
" ] }, { "cell_type": "code", "execution_count": null, "id": "febcfe22-e455-4a69-951b-0084a36c5cf9", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "4d3b4c6153e14622f42b332b7c7b8f7043577971f64edc5be6b5f8b40d5b89d1", "checkEnabled": false, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Execute the given Kubectl command.", "id": 55, "index": 55, "inputData": [ { "kubectl_command": { "constant": false, "value": "f\"kubectl get pods -n {namespace}\"" } } ], "inputschema": [ { "properties": { "kubectl_command": { "description": "kubectl command eg \"kubectl get pods --all-namespaces\"", "title": "Kubectl Command", "type": "string" } }, "required": [ "kubectl_command" ], "title": "k8s_kubectl_command", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Check Resolution", "nouns": [], "orderProperties": [ "kubectl_command" ], "output": { "type": "" }, "outputParams": { "output_name": "checkResolution", "output_name_enabled": true }, "printOutput": true, "tags": [ "k8s_kubectl_command" ], "title": "Check Resolution", "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_command_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def k8s_kubectl_command(handle, kubectl_command: str) -> str:\n", " \"\"\"k8s_kubectl_command executes the given kubectl command on the pod\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type kubectl_command: str\n", " :param kubectl_command: The Actual kubectl command, like kubectl get ns, etc..\n", "\n", " :rtype: String, Output of the command in python string format or Empty String in case of Error.\n", " \"\"\"\n", " if handle.client_side_validation != True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", "\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result is None or hasattr(result, \"stderr\") is False or result.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return str()\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"kubectl_command\": \"f\\\\\"kubectl get pods -n {namespace}\\\\\"\"\n", " }''')\n", "task.configure(outputName=\"checkResolution\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_command, lego_printer=k8s_kubectl_command_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "2a340553-4bd7-42ef-ba15-2e3125b471f9", "metadata": { "jupyter": { "source_hidden": false }, "name": "Further Steps", "orderProperties": [], "tags": [], "title": "Further Steps" }, "source": [ "***\n", "If the POD still stuck in `Terminating` state then you can consider.\n", "\n", " 1. Restarting kubelet\n", " \n", " If you can SSH to the node and restart the kublet process. You may need\n", " administrator priveleges to do so. Before you do that, you may also want\n", " to check the kubelet logs for any issues.\n", " \n", " 2. Check Whether finalizer's work needs to get done before termination\n", " \n", " This will vary depending on what the finalizer is doing. Please refer to \n", " [Finalizers](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#finalizers). Common cases for finalizers not completing realtes to\n", " Volumes.\n", " \n", "***" ] }, { "cell_type": "markdown", "id": "f07e9fa6-01da-45f3-b195-97e9f89c9465", "metadata": { "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "
In this Runbook, we were able to identify pods stuck in Terminating State and removed the finalizer(if present) and tried force deletion of pod, using unSkript's K8s actions. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "k8s: Pod Stuck in Terminating State", "parameters": [ "namespace" ] }, "kernelspec": { "display_name": "Python 3.9.6 64-bit", "language": "python", "name": "python3" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3", "version": "3.9.6" }, "parameterSchema": { "properties": { "namespace": { "description": "K8S Namespace", "title": "namespace", "type": "string" } }, "required": [ "namespace" ], "title": "Schema", "type": "object" }, "parameterValues": {}, "vscode": { "interpreter": { "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: Kubernetes/K8S_Pod_Stuck_In_Terminating_State.json ================================================ { "name": "k8s: Pod Stuck in Terminating State", "description": "This runbook checks any Pods are in terminating state in a given k8s namespace. If it finds, it tries to recover it by resetting finalizers of the pod.", "uuid": "7108717393788c2d76687490938faffe5e6e2a46f05405f180e089a166761173", "icon": "CONNECTOR_TYPE_K8S", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_K8S" ], "version": "1.0.0" } ================================================ FILE: Kubernetes/README.md ================================================ # Kubernetes RunBooks * [k8s: Delete Evicted Pods From All Namespaces](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/Delete_Evicted_Pods_From_Namespaces.ipynb): This runbook shows and deletes the evicted pods for given namespace. If the user provides the namespace input, then it only collects pods for the given namespace; otherwise, it will select all pods from all the namespaces. * [k8s: Get kube system config map](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/Get_Kube_System_Config_Map.ipynb): This runbook fetches the kube system config map for a k8s cluster and publishes the information on a Slack channel. * [IP Exhaustion Mitigation: Failing K8s Pod Deletion from Jobs](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/K8S_Delete_Pods_From_Failing_Jobs.ipynb): Preventing IP exhaustion is critical in Kubernetes environments, and a key strategy is deleting failing pods from jobs. Failing pods can consume valuable IP resources, leading to scarcity and inefficiency. By proactively identifying and removing malfunctioning pods, administrators can promptly free up IP addresses, optimizing resource utilization. This approach ensures that IP allocation remains efficient, enabling the cluster to accommodate new pods without experiencing IP exhaustion. This runbook helps us to identify failing pods within jobs thereby maximizing IP availability for other pods and services. * [k8s: Get candidate nodes for given configuration](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/K8S_Get_Candidate_Nodes_Given_Config.ipynb): This runbook get the matching nodes for a given configuration (storage, cpu, memory, pod_limit) from a k8s cluster * [Kubernetes Log Healthcheck](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/K8S_Log_Healthcheck.ipynb): This RunBook checks the logs of every pod in a namespace for warning messages. * [k8s: Pod Stuck in CrashLoopBackoff State](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/K8S_Pod_Stuck_In_CrashLoopBack_State.ipynb): This runbook checks if any Pod(s) in CrashLoopBackoff state in a given k8s namespace. If it finds, it tries to find out the reason why the Pod(s) is in that state. * [k8s: Pod Stuck in ImagePullBackOff State](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/K8S_Pod_Stuck_In_ImagePullBackOff_State.ipynb): This runbook checks if any Pod(s) in ImagePullBackOff state in a given k8s namespace. If it finds, it tries to find out the reason why the Pod(s) is in that state. * [k8s: Pod Stuck in ImagePullBackOff State using genAI](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/K8S_Pod_Stuck_In_ImagePullBackOff_State_with_genai.ipynb): This runbook checks if any Pod(s) in ImagePullBackOff state in a given k8s namespace, using genAI. If it finds, it tries to find out the reason why the Pod(s) is in that state. * [k8s: Pod Stuck in Terminating State](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/K8S_Pod_Stuck_In_Terminating_State.ipynb): This runbook checks any Pods are in terminating state in a given k8s namespace. If it finds, it tries to recover it by resetting finalizers of the pod. * [k8s: Resize List of PVCs](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/Resize_List_of_PVCs.ipynb): This runbook resizes a list of Kubernetes PVCs. * [k8s: Resize PVC](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/Resize_PVC.ipynb): This runbook resizes a Kubernetes PVC. * [Rollback Kubernetes Deployment](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/Rollback_k8s_Deployment_and_Update_Jira.ipynb): This runbook can be used to rollback Kubernetes Deployment # Kubernetes Actions * [Add Node in a Kubernetes Cluster](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_add_node_to_cluster/README.md): Add Node in a Kubernetes Cluster * [Change size of Kubernetes PVC](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_change_pvc_size/README.md): Change size of Kubernetes PVC * [Check K8s services endpoint health](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_check_service_status/README.md): This action checks the health status of the provided Kubernetes services. * [Check K8s worker CPU Utilization](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_check_worker_cpu_utilization/README.md): Retrieves the CPU utilization for all worker nodes in the cluster and compares it to a given threshold. * [Delete a Kubernetes POD in a given Namespace](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_delete_pod/README.md): Delete a Kubernetes POD in a given Namespace * [Describe Kubernetes Node](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_describe_node/README.md): Describe a Kubernetes Node * [Describe a Kubernetes POD in a given Namespace](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_describe_pod/README.md): Describe a Kubernetes POD in a given Namespace * [Execute a command on a Kubernetes POD in a given Namespace](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_exec_command_on_pod/README.md): Execute a command on a Kubernetes POD in a given Namespace * [Kubernetes Execute a command on a POD in a given namespace and filter](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_exec_command_on_pods_and_filter/README.md): Execute a command on Kubernetes POD in a given namespace and filter output * [Execute local script on a pod](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_execute_local_script_on_a_pod/README.md): Execute local script on a pod in a namespace * [Gather Data for POD Troubleshoot](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_gather_data_for_pod_troubleshoot/README.md): Gather Data for POD Troubleshoot * [Gather Data for K8S Service Troubleshoot](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_gather_data_for_service_troubleshoot/README.md): Gather Data for K8S Service Troubleshoot * [Get All Evicted PODS From Namespace](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_all_evicted_pods_from_namespace/README.md): This action get all evicted PODS from given namespace. If namespace not given it will get all the pods from all namespaces. * [ Get All Kubernetes PODS with state in a given Namespace](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_all_pods/README.md): Get All Kubernetes PODS with state in a given Namespace * [Get K8s pods status and resource utilization info](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_all_resources_utilization_info/README.md): This action gets the pod status and resource utilization of various Kubernetes resources like jobs, services, persistent volumes. * [Get candidate k8s nodes for given configuration](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_candidate_nodes_for_pods/README.md): Get candidate k8s nodes for given configuration * [Get K8S Cluster Health](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_cluster_health/README.md): Get K8S Cluster Health * [Get k8s kube system config map](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_config_map_kube_system/README.md): Get k8s kube system config map * [Get Kubernetes Deployment For a Pod in a Namespace](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_deployment/README.md): Get Kubernetes Deployment for a POD in a Namespace * [Get Deployment Status](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_deployment_status/README.md): This action search for failed deployment status and returns list. * [Get Kubernetes Error PODs from All Jobs](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_error_pods_from_all_jobs/README.md): Get Kubernetes Error PODs from All Jobs * [Get expiring K8s certificates](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_expiring_certificates/README.md): Get the expiring certificates for a K8s cluster. * [Get Kubernetes Failed Deployments](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_failed_deployments/README.md): Get Kubernetes Failed Deployments * [Get frequently restarting K8s pods](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_frequently_restarting_pods/README.md): Get Kubernetes pods from all namespaces that are restarting too often. * [Get Kubernetes Handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_handle/README.md): Get Kubernetes Handle * [Get All Kubernetes Healthy PODS in a given Namespace](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_healthy_pods/README.md): Get All Kubernetes Healthy PODS in a given Namespace * [Get memory utilization for K8s services](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_memory_utilization_of_services/README.md): This action executes the given kubectl commands to find the memory utilization of the specified services in a particular namespace and compares it with a given threshold. * [Get K8s node status and CPU utilization](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_node_status_and_resource_utilization/README.md): This action gathers Kubernetes node status and resource utilization information. * [Get Kubernetes Nodes](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_nodes/README.md): Get Kubernetes Nodes * [Get K8s nodes disk and memory pressure](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_nodes_pressure/README.md): This action fetches the memory and disk pressure status of each node in the cluster * [Get Kubernetes Nodes that have insufficient resources](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_nodes_with_insufficient_resources/README.md): Get Kubernetes Nodes that have insufficient resources * [Get K8s offline nodes](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_offline_nodes/README.md): This action checks if any node in the Kubernetes cluster is offline. * [Get K8S OOMKilled Pods](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_oomkilled_pods/README.md): Get K8S Pods which are OOMKilled from the container last states. * [Get K8s get pending pods](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_pending_pods/README.md): This action checks if any pod in the Kubernetes cluster is in 'Pending' status. * [Get Kubernetes POD Configuration](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_pod_config/README.md): Get Kubernetes POD Configuration * [Get Kubernetes Logs for a given POD in a Namespace](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_pod_logs/README.md): Get Kubernetes Logs for a given POD in a Namespace * [Get Kubernetes Logs for a list of PODs & Filter in a Namespace](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_pod_logs_and_filter/README.md): Get Kubernetes Logs for a list of PODs and Filter in a Namespace * [Get Kubernetes Status for a POD in a given Namespace](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_pod_status/README.md): Get Kubernetes Status for a POD in a given Namespace * [Get pods attached to Kubernetes PVC](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_pods_attached_to_pvc/README.md): Get pods attached to Kubernetes PVC * [Get all K8s Pods in CrashLoopBackOff State](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_pods_in_crashloopbackoff_state/README.md): Get all K8s pods in CrashLoopBackOff State * [Get all K8s Pods in ImagePullBackOff State](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_pods_in_imagepullbackoff_state/README.md): Get all K8s pods in ImagePullBackOff State * [Get Kubernetes PODs in not Running State](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_pods_in_not_running_state/README.md): Get Kubernetes PODs in not Running State * [Get all K8s Pods in Terminating State](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_pods_in_terminating_state/README.md): Get all K8s pods in Terminating State * [Get Kubernetes PODS with high restart](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_pods_with_high_restart/README.md): Get Kubernetes PODS with high restart * [Get K8S Service with no associated endpoints](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_service_with_no_associated_endpoints/README.md): Get K8S Service with no associated endpoints * [Get Kubernetes Services for a given Namespace](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_services/README.md): Get Kubernetes Services for a given Namespace * [Get Kubernetes Unbound PVCs](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_get_unbound_pvcs/README.md): Get Kubernetes Unbound PVCs * [Kubectl command](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_kubectl_command/README.md): Execute kubectl command. * [Kubectl set context entry in kubeconfig](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_kubectl_config_set_context/README.md): Kubectl set context entry in kubeconfig * [Kubectl display merged kubeconfig settings](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_kubectl_config_view/README.md): Kubectl display merged kubeconfig settings * [Kubectl delete a pod](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_kubectl_delete_pod/README.md): Kubectl delete a pod * [Kubectl describe a node](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_kubectl_describe_node/README.md): Kubectl describe a node * [Kubectl describe a pod](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_kubectl_describe_pod/README.md): Kubectl describe a pod * [Kubectl drain a node](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_kubectl_drain_node/README.md): Kubectl drain a node * [Execute command on a pod](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_kubectl_exec_command/README.md): Execute command on a pod * [Kubectl get api resources](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_kubectl_get_api_resources/README.md): Kubectl get api resources * [Kubectl get logs](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_kubectl_get_logs/README.md): Kubectl get logs for a given pod * [Kubectl get services](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_kubectl_get_service_namespace/README.md): Kubectl get services in a given namespace * [Kubectl list pods](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_kubectl_list_pods/README.md): Kubectl list pods in given namespace * [Kubectl update field](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_kubectl_patch_pod/README.md): Kubectl update field of a resource using strategic merge patch * [Kubectl rollout deployment history](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_kubectl_rollout_deployment/README.md): Kubectl rollout deployment history * [Kubectl scale deployment](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_kubectl_scale_deployment/README.md): Kubectl scale a given deployment * [Kubectl show metrics](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_kubectl_show_metrics_node/README.md): Kubectl show metrics for a given node * [Kubectl show metrics](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_kubectl_show_metrics_pod/README.md): Kubectl show metrics for a given pod * [List matching name pods](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_list_all_matching_pods/README.md): List all pods matching a particular name string. The matching string can be a regular expression too * [List pvcs](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_list_pvcs/README.md): List pvcs by namespace. By default, it will list all pvcs in all namespaces. * [Remove POD from Deployment](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_remove_pod_from_deployment/README.md): Remove POD from Deployment * [Update Commands in a Kubernetes POD in a given Namespace](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Kubernetes/legos/k8s_update_command_in_pod_spec/README.md): Update Commands in a Kubernetes POD in a given Namespace ================================================ FILE: Kubernetes/Resize_List_of_PVCs.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "16182e50-b995-4f61-a140-30c3f4902837", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) List PVCs
2) Call Resize PVC Runbook
This action fetches a list of PVC
\n", "\n", "\n", "This action takes the following parameters:
\n", "Namespace(optional)
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "c6d4498e-8d97-4790-87ff-090a7846ccd6", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "0c96676c124796bc48e751c641ea0ccc722e7d29f1ffe665fe756a7106d756c5", "checkEnabled": false, "collapsed": true, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "List pvcs by namespace. By default, it will list all pvcs in all namespaces.", "id": 48, "index": 48, "inputschema": [ { "properties": { "namespace": { "default": "", "description": "Kubernetes namespace", "title": "Namespace", "type": "string" } }, "title": "k8s_list_pvcs", "type": "object" } ], "isUnskript": false, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "List pvcs", "nouns": [], "orderProperties": [ "namespace" ], "output": { "type": "" }, "outputParams": { "output_name": "pvcsList", "output_name_enabled": true }, "printOutput": true, "tags": [ "k8s_list_pvcs" ], "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2021 unSkript.com\n", "# All rights reserved.\n", "#\n", "import pprint\n", "from pydantic import BaseModel, Field\n", "from typing import Optional, List, Tuple\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_list_pvcs_printer(output):\n", " if output is None:\n", " return\n", "\n", " pprint.pprint(output)\n", "\n", "@beartype\n", "def k8s_list_pvcs(handle, namespace: str = '') -> List:\n", " \"\"\"k8s_list_pvcs list pvcs\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type namespace: str\n", " :param namespace: Kubernetes namespace.\n", "\n", " :rtype: List\n", " \"\"\"\n", " if namespace == '':\n", " kubectl_command = 'kubectl get pvc -A --output=jsonpath=\\'{range .items[*]}{@.metadata.namespace}{\",\"}{@.metadata.name}{\"\\\\n\"}{end}\\''\n", " else:\n", " kubectl_command = 'kubectl get pvc -n ' + namespace + ' --output=jsonpath=\\'{range .items[*]}{@.metadata.namespace}{\",\"}{@.metadata.name}{\"\\\\n\"}{end}\\''\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result is None or hasattr(result, \"stderr\") is False or result.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return []\n", " names_list = [y for y in (x.strip() for x in result.stdout.splitlines()) if y]\n", " output = []\n", " for i in names_list:\n", " ns, name = i.split(\",\")\n", " output.append({\"Namespace\": ns, \"Name\":name})\n", " return output\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(outputName=\"pvcsList\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_list_pvcs, lego_printer=k8s_list_pvcs_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "e344354c-6a1b-4622-b83a-3f8cefb5791d", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 1A", "orderProperties": [], "tags": [], "title": "Step 1A" }, "source": [ "This action captures the following ouput:
\n", "pvcsList
Convert Value to float
This custom action makes an API call to the resize PVC runbook with the list of PVCs obtained from Step 1.
" ] }, { "cell_type": "code", "execution_count": 21, "id": "46616499-6e96-462c-b9fc-b16b2538d6b2", "metadata": { "actionNeedsCredential": false, "actionSupportsIteration": true, "actionSupportsPoll": true, "jupyter": { "source_hidden": true }, "name": "Call Resize PVC runbook", "orderProperties": [], "tags": [], "title": "Call Resize PVC runbook", "credentialsJson": {} }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "from pydantic import BaseModel, Field\n", "from unskript.connectors.infra import InfraConnector\n", "from typing import Optional\n", "import requests\n", "from polling2 import poll_decorator\n", "import html_to_json\n", "import uuid \n", "\n", "class Schema(BaseModel):\n", " Namespace: Optional[str] = Field(\n", " None, description='Namespace of the PVC', title='Namespace'\n", " )\n", " PVCName: Optional[str] = Field(None, description='Name of the PVC', title='PVCName')\n", " ResizeOption: Optional[str] = Field(\n", " 'Add',\n", " description='Option to resize the volume. 2 options supported: 1. Add - Use this option to resize by an amount. 2. Multiple - Use this option if you want to resize by a multiple of the current volume size.',\n", " title='ResizeOption',\n", " )\n", " RestartPodsAfterResize: Optional[bool] = Field(\n", " False,\n", " description='Restart the pods after PVC resize',\n", " title='RestartPodsAfterResize',\n", " )\n", " Channel: Optional[str] = Field(\n", " None,\n", " description='Slack Channel name where notification will be send.',\n", " title='SlackChannelName',\n", " )\n", " Value: Optional[float] = Field(\n", " None,\n", " description='Based on the resize option chosen, specify the value. For eg, if you chose Add option, this value will be a value in Gb (like 100). If you chose, this value will be a multiplying factor to the current volume size. For eg, to double, specify value as 2.',\n", " title='Value',\n", " )\n", "\n", "@poll_decorator(step=10, timeout=60, check_success=lambda x: x is True)\n", "def checkExecutionStatus(handle, tenantID, executionID) -> bool:\n", " print(f'Checking execution status')\n", " url = f'{env[\"TENANT_URL\"]}/executions/{executionID}'\n", " try:\n", " resp = handle.request('GET', url, params={'tenant_id': tenantID, \"summary\": True})\n", " resp.raise_for_status()\n", " except Exception as e:\n", " print(f'Get execution {executionID} failed, {e}')\n", " return False\n", "\n", " try:\n", " result = resp.json()\n", " except Exception:\n", " result = html_to_json.convert(resp.content)\n", " if result['execution']['executionStatus'] == \"EXECUTION_STATUS_SUCCEEDED\" or result['execution']['executionStatus'] == \"EXECUTION_STATUS_FAILED\":\n", " return True\n", " else:\n", " return False\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def call_pvc_resize_runbook(handle: InfraConnector, Namespace: str, PVCName: str, ResizeOption: str, RestartPodsAfterResize:bool, Value: float, Channel: str = None):\n", " workflowIDToBeCalled = RunbookID\n", " apiToken = APIToken\n", " tenantID = env['TENANT_ID']\n", " environmentID = env['ENVIRONMENT_ID']\n", " userID = \"Bot-user\"\n", "\n", " params = Schema()\n", " params.Namespace = Namespace\n", " params.PVCName = PVCName\n", " params.Value = Value\n", " params.ResizeOption = ResizeOption\n", " params.Channel = Channel\n", " payload = {\n", " \"req_hdr\": {\n", " \"tid\": str(uuid.uuid4())\n", " },\n", " \"tenant_id\": tenantID,\n", " \"environment_id\": environmentID,\n", " \"user_id\": userID,\n", " \"params\": params.json()\n", " }\n", " handle = requests.Session()\n", " authHeader = f'unskript-sha {apiToken}'\n", " handle.headers.update({'Authorization': authHeader})\n", " url = f'{env[\"TENANT_URL\"]}/workflows/{workflowIDToBeCalled}/run'\n", "\n", " try:\n", " resp = handle.request('POST', url, json=payload)\n", " resp.raise_for_status()\n", " except Exception as e:\n", " print(f'Workflow run failed, {e}')\n", " raise e\n", "\n", " try:\n", " result = resp.json()\n", " except Exception:\n", " result = html_to_json.convert(resp.content)\n", "\n", " executionID = result['executionId']\n", " print(f'ExecutionID {executionID}')\n", "\n", " try:\n", " checkExecutionStatus(handle, tenantID, executionID)\n", " except Exception as e:\n", " handle.close()\n", " print(f'Check execution status for {executionID} failed, {e}')\n", " raise e\n", "\n", " handle.close()\n", " return\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"Namespace\": {\n", " \"constant\": false,\n", " \"value\": \"iter.get(\\\\\\\"Namespace\\\\\\\")\"\n", " },\n", " \"PVCName\": {\n", " \"constant\": false,\n", " \"value\": \"iter.get(\\\\\\\"Name\\\\\\\")\"\n", " },\n", " \"ResizeOption\": {\n", " \"constant\": false,\n", " \"value\": \"ResizeOption\"\n", " },\n", " \"RestartPodsAfterResize\": {\n", " \"constant\": true,\n", " \"value\": false\n", " },\n", " \"Channel\": {\n", " \"constant\": false,\n", " \"value\": \"Channel\"\n", " },\n", " \"Value\": {\n", " \"constant\": false,\n", " \"value\": \"Value\"\n", " }\n", "}''')\n", "task.configure(iterJson='''{\n", " \"iter_enabled\": true,\n", " \"iter_list_is_const\": false,\n", " \"iter_list\": \"pvcsList\",\n", " \"iter_parameter\": [\n", " \"Namespace\",\n", " \"PVCName\"\n", " ]\n", "}''')\n", "\n", "\n", "\n", "(err, hdl, args) = task.validate(vars=vars(), infra=True)\n", "if err is None:\n", " task.output = task.execute(call_pvc_resize_runbook, hdl, args)\n", "if hasattr(task, 'output'):\n", " if isinstance(task.output, (list, tuple)):\n", " for item in task.output:\n", " print(f'item: {item}')\n", " elif isinstance(task.output, dict):\n", " for item in task.output.items():\n", " print(f'item: {item}')\n", " else:\n", " print(task.output)" ] }, { "cell_type": "markdown", "id": "fafb82e0-b73e-487b-8cb7-a987b59b5902", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "In this Runbook, we were able successfully resize a list of PVCs using unSkript's K8s actions and making an API call to the resize PVC runbook. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "k8s: Resize List of PVCs", "parameters": [ "ResizeOption", "RunbookID", "Value", "APIToken", "Channel" ] }, "kernelspec": { "display_name": "Python 3.10.6 64-bit", "language": "python", "name": "python3" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "parameterSchema": { "properties": { "APIToken": { "description": "APIToken to talk to unskript apis", "title": "APIToken", "type": "string" }, "Channel": { "description": "Slack Channel name where notification will be send", "title": "Channel", "type": "string" }, "ResizeOption": { "default": "Add", "description": "Option to resize the volume. 2 options supported: 1. Add - Use this option to resize by an amount. 2. Multiple - Use this option if you want to resize by a multiple of the current volume size.", "title": "ResizeOption", "type": "string" }, "RunbookID": { "default": "b8385df9545bdb5695af879d7d089571fed148d996cf4b7e9e7848502e2cc029", "description": "UUID of the PVC Resize runbook", "title": "RunbookID", "type": "string" }, "Value": { "description": "Based on the resize option chosen, specify the value. For eg, if you chose Add option, this value will be a value in Gb (like 100). If you chose, this value will be a multiplying factor to the current volume size. For eg, to double, specify value as 2.", "title": "Value", "type": "number" } }, "required": [ "APIToken", "Value" ], "title": "Schema", "type": "object" }, "parameterValues": null, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: Kubernetes/Resize_List_of_PVCs.json ================================================ { "name": "k8s: Resize List of PVCs", "description": "This runbook resizes a list of Kubernetes PVCs.", "uuid": "40df55f0b809c1f77b7c5c5c106fc534f58b7eb93ac92993723e9798631e7359", "icon": "CONNECTOR_TYPE_K8S", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_K8S" ], "version": "1.0.0" } ================================================ FILE: Kubernetes/Resize_PVC.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "9ac20689-a687-4867-a035-676d8b5c46bf", "metadata": { "jupyter": { "source_hidden": false }, "name": "Steps Overview", "orderProperties": [], "tags": [], "title": "Steps Overview" }, "source": [ "\n", "
1) Get Storage class of PVC
2) Get Storage Class details
3) Change size of PVC
4) Restart the pod
5) Execute a command on a Kuberentes POD
6) Post Slack message
\n", "
This action fetches storage class for PVC to execute Step 2\ud83d\udc47
\n", "\n", "\n", "This action takes the following parameters:
\n", "PVCName, Namespace
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "3b6aa5ee-f63b-4018-9467-068572ddef93", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "4d3b4c6153e14622f42b332b7c7b8f7043577971f64edc5be6b5f8b40d5b89d1", "checkEnabled": false, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Execute the given Kubectl command.", "id": 55, "index": 55, "inputData": [ { "kubectl_command": { "constant": false, "value": "f\"kubectl get pvc {PVCName} -n {Namespace} --output=jsonpath={{.spec.storageClassName}}\"" } } ], "inputschema": [ { "properties": { "kubectl_command": { "description": "kubectl command eg \"kubectl get pods --all-namespaces\"", "title": "Kubectl Command", "type": "string" } }, "required": [ "kubectl_command" ], "title": "k8s_kubectl_command", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Get Storage Class for the PVC", "nouns": [], "orderProperties": [ "kubectl_command" ], "output": { "type": "" }, "outputParams": { "output_name": "storageClass", "output_name_enabled": true }, "printOutput": true, "tags": [ "k8s_kubectl_command" ], "title": "Get Storage Class for the PVC", "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_command_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def k8s_kubectl_command(handle, kubectl_command: str) -> str:\n", " \"\"\"k8s_kubectl_command executes the given kubectl command on the pod\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type kubectl_command: str\n", " :param kubectl_command: The Actual kubectl command, like kubectl get ns, etc..\n", "\n", " :rtype: String, Output of the command in python string format or Empty String in case of Error.\n", " \"\"\"\n", " if handle.client_side_validation != True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", "\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result is None or hasattr(result, \"stderr\") is False or result.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return str()\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"kubectl_command\": \"f\\\\\"kubectl get pvc {PVCName} -n {Namespace} --output=jsonpath={{.spec.storageClassName}}\\\\\"\"\n", " }''')\n", "task.configure(outputName=\"storageClass\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_command, lego_printer=k8s_kubectl_command_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "67898de3-7c41-4e9c-abb2-2c8086b19ad9", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2", "orderProperties": [], "tags": [], "title": "Step 2" }, "source": [ "This action captures the following ouput:
\n", "storageClass
This action fetches storage class details for PVC
\n", "\n", "\n", "This action takes the following parameters:
\n", "storageClass
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "6e01bd2b-8a37-4579-bb64-273e921b3712", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "4d3b4c6153e14622f42b332b7c7b8f7043577971f64edc5be6b5f8b40d5b89d1", "checkEnabled": false, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Execute the given Kubectl command.", "id": 55, "index": 55, "inputData": [ { "kubectl_command": { "constant": false, "value": "f\"kubectl get sc {storageClass} --output=jsonpath={{.allowVolumeExpansion}}\"" } } ], "inputschema": [ { "properties": { "kubectl_command": { "description": "kubectl command eg \"kubectl get pods --all-namespaces\"", "title": "Kubectl Command", "type": "string" } }, "required": [ "kubectl_command" ], "title": "k8s_kubectl_command", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Get Storage class details", "nouns": [], "orderProperties": [ "kubectl_command" ], "output": { "type": "" }, "outputParams": { "output_name": "allowVolumeExpansion", "output_name_enabled": true }, "printOutput": true, "tags": [ "k8s_kubectl_command" ], "title": "Get Storage class details", "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_command_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def k8s_kubectl_command(handle, kubectl_command: str) -> str:\n", " \"\"\"k8s_kubectl_command executes the given kubectl command on the pod\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type kubectl_command: str\n", " :param kubectl_command: The Actual kubectl command, like kubectl get ns, etc..\n", "\n", " :rtype: String, Output of the command in python string format or Empty String in case of Error.\n", " \"\"\"\n", " if handle.client_side_validation != True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", "\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result is None or hasattr(result, \"stderr\") is False or result.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return str()\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"kubectl_command\": \"f\\\\\"kubectl get sc {storageClass} --output=jsonpath={{.allowVolumeExpansion}}\\\\\"\"\n", " }''')\n", "task.configure(outputName=\"allowVolumeExpansion\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_command, lego_printer=k8s_kubectl_command_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "184efdc8-9c65-4e52-a49b-c9528dff6f94", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 2A", "orderProperties": [], "tags": [], "title": "Step 2A" }, "source": [ "This action captures the following ouput:
\n", "allowVolumeExpansion
This action verifies that allowVolumeExpansion is enabled for the storage class for the PVC. Assert if its not enabled.
\n", "\n", "\n", "This action takes the following parameters:
\n", "None
\n", "" ] }, { "cell_type": "code", "execution_count": 6, "id": "17593357-01c4-4219-be83-06bafebbb0e6", "metadata": { "actionNeedsCredential": false, "actionSupportsIteration": false, "actionSupportsPoll": false, "execution_data": { "last_date_success_run_cell": "2022-07-06T07:54:16.872Z" }, "jupyter": { "source_hidden": true }, "orderProperties": [], "tags": [], "title": "Check if storage class has allowVolumeExpansion enabled", "credentialsJson": {} }, "outputs": [], "source": [ "if allowVolumeExpansion == \"\" or allowVolumeExpansion is False:\n", " print(f'allowVolumeExpansion disabled for storage class {storageClass}, exiting')\n", " assert(f'allowVolumeExpansion disabled for storage class {storageClass}')\n", "else:\n", " print(f'allowVolumeExpansion enabled for storage class {storageClass}')" ] }, { "cell_type": "markdown", "id": "a15d0df5-5123-48a8-9785-ba47c786961c", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 3", "orderProperties": [], "tags": [], "title": "Step 3" }, "source": [ "This action captures the following ouput:
\n", "None
This action increases the PVC Volume by the provided Value depending upon ResizeOption chosen.
\n", "\n", "\n", "This action takes the following parameters:
\n", "namespace, PVCName, value
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "d68afc44-f6b0-4e23-8a36-26f2f48bc04b", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "c82954c96e797711613cd6b0ef8c6ab45a6af26f191115df128396bb056450d2", "checkEnabled": false, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Change size of Kubernetes PVC", "id": 32, "index": 32, "inputData": [ { "name": { "constant": false, "value": "PVCName" }, "namespace": { "constant": false, "value": "Namespace" }, "resize_option": { "constant": true, "value": "Add" }, "resize_value": { "constant": false, "value": "Value" } } ], "inputschema": [ { "definitions": { "SizingOption": { "description": "An enumeration.", "enum": [ "Add", "Multiple" ], "title": "SizingOption", "type": "string" } }, "properties": { "name": { "description": "Name of the PVC.", "title": "PVC Name", "type": "string" }, "namespace": { "description": "Namespace of the PVC.", "title": "Namespace", "type": "string" }, "resize_option": { "allOf": [ { "$ref": "#/definitions/SizingOption" } ], "default": "\"Add\"", "description": "\n Option to resize the volume. 2 options supported:\n 1. Add - Use this option to resize by an amount.\n 2. Multiple - Use this option if you want to resize by a multiple of the current volume size.\n ", "title": "Resize option" }, "resize_value": { "description": "\n Based on the resize option chosen, specify the value. For eg, if you chose Add option, this\n value will be a value in Gi (like 100). If you chose Multiple option, this value will be a multiplying factor\n to the current volume size. So, if you want to double, you specify 2 here.\n ", "title": "Value", "type": "number" } }, "required": [ "namespace", "name", "resize_value" ], "title": "k8s_change_pvc_size", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Change size of Kubernetes PVC", "nouns": [], "orderProperties": [ "namespace", "name", "resize_option", "resize_value" ], "output": { "type": "" }, "printOutput": true, "tags": [ "k8s_change_pvc_size" ], "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "from typing import Optional\n", "from unskript.enums.aws_k8s_enums import SizingOption\n", "import pprint\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_change_pvc_size_printer(output):\n", " if output is None:\n", " return\n", "\n", " pprint.pprint(output)\n", "\n", "\n", "\n", "@beartype\n", "def k8s_change_pvc_size(handle, namespace: str, name: str, resize_option: SizingOption, resize_value: float) -> str:\n", " \"\"\"k8s_change_pvc_size change pvc size\n", "\n", " :type name: str\n", " :param name: Name of the PVC.\n", "\n", " :type resize_option: SizingOption\n", " :param resize_option: Option to resize the volume.\n", "\n", " :type resize_value: float\n", " :param resize_value: Based on the resize option chosen, specify the value.\n", "\n", " :type namespace: str\n", " :param namespace: Namespace of the PVC.\n", "\n", " :rtype: string\n", " \"\"\"\n", " # Get the current size.\n", " kubectl_command = f'kubectl get pvc {name} -n {namespace} -o jsonpath={{.status.capacity.storage}}'\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result.stderr:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return str(f\"Error Changing PVC Size {kubectl_command}: {result.stderr}\")\n", "\n", " currentSize = result.stdout\n", " currentSizeInt = int(currentSize.rstrip(\"Gi\"))\n", " if resize_option == SizingOption.Add:\n", " newSizeInt = currentSizeInt + resize_value\n", " else:\n", " newSizeInt = currentSizeInt * resize_value\n", " newSize = str(newSizeInt) + \"Gi\"\n", " print(f'Current size {currentSize}, new Size {newSize}')\n", " kubectl_command = f'kubectl patch pvc {name} -n {namespace} -p \\'{{\"spec\":{{\"resources\":{{\"requests\": {{\"storage\": \"{newSize}\"}}}}}}}}\\''\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result.stderr:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return str(f\"Error Changing PVC Size {kubectl_command}: {result.stderr}\")\n", " print(f'PVC {name} size changed to {newSize} successfully')\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"name\": \"PVCName\",\n", " \"namespace\": \"Namespace\",\n", " \"resize_option\": \"SizingOption.Add\",\n", " \"resize_value\": \"float(Value)\"\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_change_pvc_size, lego_printer=k8s_change_pvc_size_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "e9ad51be-bee4-4b23-800d-ae5d17af5455", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 4A", "orderProperties": [], "tags": [], "title": "Step 4A" }, "source": [ "This action captures the following ouput:
\n", "None
This action gets the pods attached to a Kuberneted PVC
\n", "\n", "\n", "This action takes the following parameters:
\n", "namespace, PVCName
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "7782fc86-9231-4e8e-bdc7-cf133b7b8967", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "eedf20eddc44193edbda5e7df1810485ae415f496aebb77edbd995f7901602ee", "checkEnabled": false, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Get pods attached to Kubernetes PVC", "id": 62, "index": 62, "inputData": [ { "namespace": { "constant": false, "value": "Namespace" }, "pvc": { "constant": false, "value": "PVCName" } } ], "inputschema": [ { "properties": { "namespace": { "description": "Namespace of the PVC.", "title": "Namespace", "type": "string" }, "pvc": { "description": "Name of the PVC.", "title": "PVC Name", "type": "string" } }, "required": [ "namespace", "pvc" ], "title": "k8s_get_pods_attached_to_pvc", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Get pods attached to Kubernetes PVC", "nouns": [], "orderProperties": [ "namespace", "pvc" ], "output": { "type": "" }, "outputParams": { "output_name": "podName", "output_name_enabled": true }, "printOutput": true, "tags": [ "k8s_get_pods_attached_to_pvc" ], "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2021 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_get_pods_attached_to_pvc_printer(output):\n", " if output is None:\n", " return\n", "\n", " print(output)\n", "\n", "\n", "\n", "@beartype\n", "def k8s_get_pods_attached_to_pvc(handle, namespace: str, pvc: str) -> str:\n", " \"\"\"k8s_get_pods_attached_to_pvc get pods attached to pvc\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type namespace: str\n", " :param namespace: Namespace of the PVC.\n", "\n", " :type pvc: str\n", " :param pvc: Name of the PVC.\n", "\n", " :rtype: string\n", " \"\"\"\n", " kubectl_command = f\"kubectl describe pvc {pvc} -n {namespace} | awk \\'/Used By/ {{print $3}}\\'\"\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result.stderr:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return str()\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"namespace\": \"Namespace\",\n", " \"pvc\": \"PVCName\"\n", " }''')\n", "task.configure(outputName=\"podName\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_get_pods_attached_to_pvc, lego_printer=k8s_get_pods_attached_to_pvc_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "c1171c52-a5dc-4a71-bc70-6203b0a194c3", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 4B", "orderProperties": [], "tags": [], "title": "Step 4B" }, "source": [ "This action captures the following ouput:
\n", "podName
This action simply extracts the pod name attached to a Kuberneted PVC
" ] }, { "cell_type": "code", "execution_count": null, "id": "e066a114-5445-45b0-a64d-e1c32c5bd37b", "metadata": { "actionNeedsCredential": false, "actionSupportsIteration": false, "actionSupportsPoll": false, "execution_data": { "last_date_success_run_cell": "2022-07-06T08:20:59.242Z" }, "jupyter": { "source_hidden": true }, "orderProperties": [], "tags": [], "title": "Podname", "credentialsJson": {} }, "outputs": [], "source": [ "podName = podName.strip()\n", "print(f'Pod {podName} attached to PVC {PVCName}')" ] }, { "cell_type": "markdown", "id": "e8f65779-7310-41d6-bfd8-8d59f8fdcba6", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 4", "orderProperties": [], "tags": [], "title": "Step 4" }, "source": [ "This action restarts the pod. If RestartPodsAfterResize is enabled, it restarts the pods attached to the PVC.
\n", "NOTE: This is not required if the kubernetes has ExpandInUsePersistentVolumes enabled.
\n", "\n", "\n", "This action takes the following parameters:
\n", "namespace, podName
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "66c3f5ee-f13e-4a62-bf5e-87b1eea0e262", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "4d3b4c6153e14622f42b332b7c7b8f7043577971f64edc5be6b5f8b40d5b89d1", "checkEnabled": false, "condition_enabled": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Execute the given Kubectl command.", "id": 55, "index": 55, "inputData": [ { "kubectl_command": { "constant": false, "value": "f\"kubectl delete pod {podName} -n {Namespace}\"" } } ], "inputschema": [ { "properties": { "kubectl_command": { "description": "kubectl command eg \"kubectl get pods --all-namespaces\"", "title": "Kubectl Command", "type": "string" } }, "required": [ "kubectl_command" ], "title": "k8s_kubectl_command", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Restart the pod", "nouns": [], "orderProperties": [ "kubectl_command" ], "output": { "type": "" }, "printOutput": true, "startcondition": "RestartPodsAfterResize==True", "tags": [ "k8s_kubectl_command" ], "title": "Restart the pod", "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_command_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def k8s_kubectl_command(handle, kubectl_command: str) -> str:\n", " \"\"\"k8s_kubectl_command executes the given kubectl command on the pod\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type kubectl_command: str\n", " :param kubectl_command: The Actual kubectl command, like kubectl get ns, etc..\n", "\n", " :rtype: String, Output of the command in python string format or Empty String in case of Error.\n", " \"\"\"\n", " if handle.client_side_validation != True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", "\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result is None or hasattr(result, \"stderr\") is False or result.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return str()\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"kubectl_command\": \"f\\\\\"kubectl delete pod {podName} -n {Namespace}\\\\\"\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"RestartPodsAfterResize==True\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_command, lego_printer=k8s_kubectl_command_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "64daaeb5-b33a-43d5-8052-2a103cd0cd04", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 5", "orderProperties": [], "tags": [], "title": "Step 5" }, "source": [ "This action captures the following ouput:
\n", "None
This action verifies resize by running 'df-kh' on the pod attached to the PVC.
\n", "\n", "\n", "This action takes the following parameters:
\n", "namespace, podName
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "79c4d6b1-aa30-4865-b3da-1ee6b0a65105", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "8383fbb16190afe9c1936fcceab4f438e45e24f288491416037be1ed07e50c57", "checkEnabled": false, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Execute a command on a Kubernetes POD in a given Namespace", "id": 43, "index": 43, "inputData": [ { "command": { "constant": false, "value": "[\"df\", \"-kh\"]" }, "namespace": { "constant": false, "value": "Namespace" }, "podname": { "constant": false, "value": "podName" } } ], "inputschema": [ { "properties": { "command": { "description": "Commands to execute on the Pod. Eg \"df -k\"", "title": "Command", "type": "string" }, "namespace": { "description": "Kubernetes namespace.", "title": "Namespace", "type": "string" }, "podname": { "description": "Kubernetes Pod Name", "title": "Pod", "type": "string" } }, "required": [ "namespace", "podname", "command" ], "title": "k8s_exec_command_on_pod", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Execute a command on a Kubernetes POD in a given Namespace", "nouns": [], "orderProperties": [ "namespace", "podname", "command" ], "output": { "type": "" }, "printOutput": true, "tags": [ "k8s_exec_command_on_pod" ], "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2021 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from posixpath import split\n", "from typing import List\n", "import pprint\n", "from kubernetes import client\n", "from kubernetes.stream import stream\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_exec_command_on_pod_printer(output):\n", " if output is None:\n", " return\n", "\n", " pprint.pprint(output)\n", "\n", "\n", "@beartype\n", "def k8s_exec_command_on_pod(handle, namespace: str, podname: str, command: str) -> str:\n", " \"\"\"k8s_exec_command_on_pod executes the given kubectl command on the pod\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type namespace: str\n", " :param namespace: Kubernetes namespace.\n", "\n", " :type podname: str\n", " :param podname: Kubernetes Pod Name.\n", "\n", " :type command: str\n", " :param command: Commands to execute on the Pod.\n", "\n", " :rtype: String, Output of the command in python string format or Empty String in case of Error.\n", " \"\"\"\n", " coreApiClient = client.CoreV1Api(api_client=handle)\n", "\n", " try:\n", " resp = stream(coreApiClient.connect_get_namespaced_pod_exec,\n", " podname,\n", " namespace,\n", " command=command.split(),\n", " stderr=True,\n", " stdin=True,\n", " stdout=True,\n", " tty=False\n", " )\n", " except Exception as e:\n", " resp = f'An Exception occured while executing the command {e}'\n", "\n", " return resp\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"command\": \"[\\\\\"df\\\\\", \\\\\"-kh\\\\\"]\",\n", " \"namespace\": \"Namespace\",\n", " \"podname\": \"podName\"\n", " }''')\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_exec_command_on_pod, lego_printer=k8s_exec_command_on_pod_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "d5990ec2-daff-4289-acd3-e2bafc92c46a", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 5A", "orderProperties": [], "tags": [], "title": "Step 5A" }, "source": [ "This action captures the following ouput:
\n", "None
This action further verifies the resize by running commands on the local k8s cluster and gets the new size.
\n", "\n", "\n", "This action takes the following parameters:
\n", "namespace, PVCName
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "1d79ae1f-ae84-4416-a19a-8bcd5c33d2e0", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "4d3b4c6153e14622f42b332b7c7b8f7043577971f64edc5be6b5f8b40d5b89d1", "checkEnabled": false, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Execute the given Kubectl command.", "id": 55, "index": 55, "inputData": [ { "kubectl_command": { "constant": false, "value": "f\"kubectl get pvc {PVCName} -n {Namespace} -o jsonpath={{.status.capacity.storage}}\"" } } ], "inputschema": [ { "properties": { "kubectl_command": { "description": "kubectl command eg \"kubectl get pods --all-namespaces\"", "title": "Kubectl Command", "type": "string" } }, "required": [ "kubectl_command" ], "title": "k8s_kubectl_command", "type": "object" } ], "isUnskript": false, "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_K8S", "name": "Run kubectl commands on local k8s cluster", "nouns": [], "orderProperties": [ "kubectl_command" ], "output": { "type": "" }, "outputParams": { "output_name": "newSize", "output_name_enabled": true }, "printOutput": true, "tags": [ "k8s_kubectl_command" ], "title": "Run kubectl commands on local k8s cluster", "verbs": [], "credentialsJson": {} }, "outputs": [], "source": [ "#\n", "# Copyright (c) 2022 unSkript.com\n", "# All rights reserved.\n", "#\n", "\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def k8s_kubectl_command_printer(output):\n", " if output is None:\n", " return\n", " print(output)\n", "\n", "\n", "@beartype\n", "def k8s_kubectl_command(handle, kubectl_command: str) -> str:\n", " \"\"\"k8s_kubectl_command executes the given kubectl command on the pod\n", "\n", " :type handle: object\n", " :param handle: Object returned from the Task validate method\n", "\n", " :type kubectl_command: str\n", " :param kubectl_command: The Actual kubectl command, like kubectl get ns, etc..\n", "\n", " :rtype: String, Output of the command in python string format or Empty String in case of Error.\n", " \"\"\"\n", " if handle.client_side_validation != True:\n", " print(f\"K8S Connector is invalid: {handle}\")\n", " return str()\n", "\n", " result = handle.run_native_cmd(kubectl_command)\n", " if result is None or hasattr(result, \"stderr\") is False or result.stderr is None:\n", " print(\n", " f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n", " return str()\n", "\n", " return result.stdout\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(inputParamsJson='''{\n", " \"kubectl_command\": \"f\\\\\"kubectl get pvc {PVCName} -n {Namespace} -o jsonpath={{.status.capacity.storage}}\\\\\"\"\n", " }''')\n", "task.configure(outputName=\"newSize\")\n", "\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(k8s_kubectl_command, lego_printer=k8s_kubectl_command_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "67ed3763-1fc2-4fd4-9cdd-b140821e4fe0", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step 6", "orderProperties": [], "tags": [], "title": "Step 6" }, "source": [ "This action captures the following ouput:
\n", "newSize
This action posts a slack message notifying the new size of the PVC.
\n", "\n", "\n", "This action takes the following parameters:
\n", "Channel
\n", "" ] }, { "cell_type": "code", "execution_count": null, "id": "fee9946d-d975-42c7-8651-ff7b55250fb9", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "6a87f83ab0ecfeecb9c98d084e2b1066c26fa64be5b4928d5573a5d60299802d", "condition_enabled": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "currentVersion": "0.1.0", "description": "Post Slack Message", "execution_data": { "last_date_success_run_cell": "2022-07-07T04:05:14.429Z" }, "id": 46, "index": 46, "inputData": [ { "channel": { "constant": false, "value": "Channel" }, "message": { "constant": false, "value": "f\"PVC {PVCName} successfully resized to {newSize}\"" } } ], "inputschema": [ { "properties": { "channel": { "description": "Name of the slack channel where the message to be posted", "title": "Channel", "type": "string" }, "message": { "description": "Message to be sent", "title": "Message", "type": "string" } }, "required": [ "channel", "message" ], "title": "slack_post_message", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_SLACK", "name": "Post Slack Message", "nouns": [ "slack", "message" ], "orderProperties": [ "channel", "message" ], "output": { "type": "" }, "printOutput": true, "startcondition": "if len(Channel)!=0", "tags": [ "slack_post_message" ], "verbs": [ "post" ], "credentialsJson": {} }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "\n", "import pprint\n", "\n", "from pydantic import BaseModel, Field\n", "from slack_sdk import WebClient\n", "from slack_sdk.errors import SlackApiError\n", "\n", "pp = pprint.PrettyPrinter(indent=2)\n", "\n", "\n", "from beartype import beartype\n", "@beartype\n", "def slack_post_message_printer(data):\n", " if data != None:\n", " pprint.pprint(data)\n", "\n", "@beartype\n", "def slack_post_message(\n", " handle: WebClient,\n", " channel: str,\n", " message: str) -> str:\n", "\n", " try:\n", " response = handle.chat_postMessage(\n", " channel=channel,\n", " text=message)\n", " return f\"Successfuly Sent Message on Channel: #{channel}\"\n", " except SlackApiError as e:\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.response['error']}\")\n", " if e.response['error'] == 'channel_not_found':\n", " raise Exception('Channel Not Found')\n", " elif e.response['error'] == 'duplicate_channel_not_found':\n", " raise Exception('Channel associated with the message_id not valid')\n", " elif e.response['error'] == 'not_in_channel':\n", " raise Exception('Cannot post message to channel user is not in')\n", " elif e.reponse['error'] == 'is_archived':\n", " raise Exception('Channel has been archived')\n", " elif e.response['error'] == 'msg_too_long':\n", " raise Exception('Message text is too long')\n", " elif e.response['error'] == 'no_text':\n", " raise Exception('Message text was not provided')\n", " elif e.response['error'] == 'restricted_action':\n", " raise Exception('Workspace preference prevents user from posting')\n", " elif e.response['error'] == 'restricted_action_read_only_channel':\n", " raise Exception('Cannot Post message, read-only channel')\n", " elif e.respones['error'] == 'team_access_not_granted':\n", " raise Exception('The token used is not granted access to the workspace')\n", " elif e.response['error'] == 'not_authed':\n", " raise Exception('No Authtnecition token provided')\n", " elif e.response['error'] == 'invalid_auth':\n", " raise Exception('Some aspect of Authentication cannot be validated. Request denied')\n", " elif e.response['error'] == 'access_denied':\n", " raise Exception('Access to a resource specified in the request denied')\n", " elif e.response['error'] == 'account_inactive':\n", " raise Exception('Authentication token is for a deleted user')\n", " elif e.response['error'] == 'token_revoked':\n", " raise Exception('Authentication token for a deleted user has been revoked')\n", " elif e.response['error'] == 'no_permission':\n", " raise Exception('The workspace toekn used does not have necessary permission to send message')\n", " elif e.response['error'] == 'ratelimited':\n", " raise Exception('The request has been ratelimited. Retry sending message later')\n", " elif e.response['error'] == 'service_unavailable':\n", " raise Exception('The service is temporarily unavailable')\n", " elif e.response['error'] == 'fatal_error':\n", " raise Exception('The server encountered catostrophic error while sending message')\n", " elif e.response['error'] == 'internal_error':\n", " raise Exception('The server could not complete operation, likely due to transietn issue')\n", " elif e.response['error'] == 'request_timeout':\n", " raise Exception('Sending message error via POST: either message was missing or truncated')\n", " else:\n", " raise Exception(f'Failed Sending Message to slack channel {channel} Error: {e.response[\"error\"]}')\n", "\n", " return f\"Unable to send message on {channel}\"\n", " except Exception as e:\n", " print(\"\\n\\n\")\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.__str__()}\")\n", " return f\"Unable to send message on {channel}\"\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(inputParamsJson='''{\n", " \"channel\": \"Channel\",\n", " \"message\": \"f\\\\\"PVC {PVCName} successfully resized to {newSize}\\\\\"\"\n", " }''')\n", "task.configure(conditionsJson='''{\n", " \"condition_enabled\": true,\n", " \"condition_cfg\": \"if len(Channel)!=0\",\n", " \"condition_result\": true\n", " }''')\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(slack_post_message, lego_printer=slack_post_message_printer, hdl=hdl, args=args)" ] }, { "cell_type": "markdown", "id": "3f624369-89fd-4354-b13e-3de80d4465d4", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "This action captures the following ouput:
\n", "None
In this Runbook, we were able successfully resize a PVC using unSkript's K8s actions. To view the full platform capabilities of unSkript please visit us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "k8s: Resize PVC", "parameters": [ "RestartPodsAfterResize", "Value", "Channel", "Namespace", "PVCName", "ResizeOption" ] }, "kernelspec": { "display_name": "Python 3.10.6 64-bit", "language": "python", "name": "python3" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "parameterSchema": { "properties": { "Channel": { "description": "Slack channel", "title": "Channel", "type": "string" }, "Namespace": { "description": "Namespace of the PVC", "title": "Namespace", "type": "string" }, "PVCName": { "description": "Name of the PVC", "title": "PVCName", "type": "string" }, "ResizeOption": { "default": "Add", "description": "Option to resize the volume. 2 options supported: 1. Add - Use this option to resize by an amount. 2. Multiple - Use this option if you want to resize by a multiple of the current volume size.", "enum": [ "Add" ], "enumNames": [ "Add" ], "title": "ResizeOption", "type": "string" }, "RestartPodsAfterResize": { "default": false, "description": "Restart the pods after PVC resize", "title": "RestartPodsAfterResize", "type": "boolean" }, "Value": { "description": "Based on the resize option chosen, specify the value (float). For eg, if you chose Add option, this value will be a value in Gb (like 100.0). If you chose, this value will be a multiplying factor to the current volume size. For eg, to double, specify value as 2.0", "title": "Value", "type": "number" } }, "required": [ "Namespace", "PVCName", "Value" ], "title": "Schema", "type": "object" }, "parameterValues": {}, "vscode": { "interpreter": { "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: Kubernetes/Resize_PVC.json ================================================ { "name": "k8s: Resize PVC", "description": "This runbook resizes a Kubernetes PVC.", "uuid": "b8385df9545bdb5695af879d7d089571fed148d996cf4b7e9e7848502e2cc029", "icon": "CONNECTOR_TYPE_K8S", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_K8S" ], "version": "1.0.0" } ================================================ FILE: Kubernetes/Rollback_k8s_Deployment_and_Update_Jira.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "b176b2f8-b2a7-48e5-a573-1d2058900ba1", "metadata": { "jupyter": { "source_hidden": false }, "name": "Runbook Overview", "orderProperties": [], "tags": [], "title": "Runbook Overview" }, "source": [ "In this action, we collect all the namespaces available in the cluster as a list. This action only executes if the namespace parameter is not given.
\n", "kubectl_commandnamespace_listIn this action, we modify the output which collects from the Gathering Information cell and returns a list of all the namespaces or given namespaces.
\n", "namespace_dataHere we will use the unSkript Get Deployment Rollout Status action. This action is used to identify the status of deployment for the namespace and return a list of a dictionary that contains the deployments which failed.
\n", "namespace, deploymentdeployment_dataIn this action, we modify the output which collects from step 1 and return a list of dictionaries for the failed deployments.
\n", "rollout_deploymentHere we will use the unSkript Kubectl rollout deployment action. This action is used to roll back the deployment to a stable version.
\n", "k8s_cli_string, deployment, namespacerollback_statusHere we will use unSkript Change JIRA Issue Status action. This action is used to update the status of the Jira issue. It will only execute if the issue id is given.
\n", "issue_id, status, transitionupdate_statusIn this Runbook, we demonstrated the use of unSkript's AWS and Jira actions to roll back the Kubernetes deployment to the previous stable version and update the issue status in jira. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Rollback Kubernetes Deployment", "parameters": [ "issue_id", "namespace" ] }, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "parameterSchema": { "properties": { "issue_id": { "description": "Jira Issue ID. e.g. EN-123", "title": "issue_id", "type": "string" }, "namespace": { "description": "Namespace", "title": "namespace", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": null, "vscode": { "interpreter": { "hash": "e8899eb02dfbc033aab5733bdae1bd213fa031d40331094008e8673d99ebab63" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: Kubernetes/Rollback_k8s_Deployment_and_Update_Jira.json ================================================ { "name": "Rollback Kubernetes Deployment", "description": "This runbook can be used to rollback Kubernetes Deployment", "uuid": "65afc892db3d7ef487fe2353282bf94351e4674a34f56cd0349a2ad920897ddd", "icon": "CONNECTOR_TYPE_K8S", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_TROUBLESHOOTING" ], "connector_types": [ "CONNECTOR_TYPE_K8S" ], "version": "1.0.0" } ================================================ FILE: Kubernetes/__init__.py ================================================ # # unSkript (c) 2022 ================================================ FILE: Kubernetes/legos/README.md ================================================ [
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_add_node_to_cluster/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_add_node_to_cluster/k8s_add_node_to_cluster.json
================================================
{
"action_title": "Add Node in a Kubernetes Cluster",
"action_description": "Add Node in a Kubernetes Cluster",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_add_node_to_cluster",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_CLUSTER","CATEGORY_TYPE_K8S_NODE"]
}
================================================
FILE: Kubernetes/legos/k8s_add_node_to_cluster/k8s_add_node_to_cluster.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
import pprint
from typing import List, Tuple
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
pp = pprint.PrettyPrinter(indent=2)
class InputSchema(BaseModel):
node_name: str = Field(
title='Node',
description='k8s node name')
cluster_name: str = Field(
title='Cluster',
description='k8s cluster Name')
provider_id: str = Field(
title='Node Spec Provider',
description='k8s node spec provider ID. Eg aws:///us-west-2a/{instance_type}')
node_info: dict = Field(
title='Node Info',
description='Node system info like architecture, boot_id, etc. '
'Allowed key names are: '
'architecture, '
'boot_id, '
'container_runtime_version, '
'kernel_version, '
'kube_proxy_version, '
'kubelet_version, '
'machine_id, '
'operating_system, '
'os_image, '
'system_uuid.'
)
capacity: dict = Field(
title='Node Capacity',
description='Node Parameters, like cpu, storage, memory. '
'For eg: attachable-volumes-aws-ebs=25 in gb, '
'cpu=1 core, memory=7935036Ki, '
'ephemeral-storage:104845292Ki, hugepages-1Gi:0, '
'hugepages-2Mi:0, pods:29'
)
def k8s_add_node_to_cluster_printer(output):
if output is None:
return None
(v1node, data) = output
if v1node is not None:
pp.pprint(f"Creating Node {v1node}")
else:
pp.pprint("Error Creating Node")
if data is not None:
pp.pprint(f"Node Created {data}")
else:
pp.pprint("Node Creation Error")
return data
def k8s_add_node_to_cluster(handle,
node_name: str,
cluster_name: str,
provider_id: str,
node_info: dict,
capacity: dict) -> Tuple:
"""k8s_add_node_to_cluster add node to cluster
:type handle: object
:param handle: Object returned from the Task validate method
:type node_name: str
:param node_name: k8s node name
:type cluster_name: str
:param cluster_name: k8s cluster Name
:type provider_id: str
:param provider_id: k8s node spec provider ID. Eg aws:///us-west-2a/{instance_type}
:type node_info: str
:param node_info: Node system info like architecture, boot_id, etc.
:type capacity: dict
:param capacity: Node Parameters, like cpu, storage, memory.
:rtype: None
"""
coreApiClient = client.CoreV1Api(handle)
try:
v1Node = client.V1Node()
metadata = client.V1ObjectMeta()
metadata.name = node_name
metadata.cluster_name = cluster_name
v1Node.metadata = metadata
v1NodeSpec = client.V1NodeSpec()
v1NodeSpec.provider_id = provider_id
v1Node.spec = v1NodeSpec
v1NodeStatus = client.V1NodeStatus()
if capacity:
v1NodeStatus.capacity = capacity
if node_info:
v1NodeSystemInfo = client.V1NodeSystemInfo(
architecture=node_info.get("architecture", None),
boot_id=node_info.get("boot_id", None),
container_runtime_version=node_info.get("container_runtime_version", None),
kernel_version=node_info.get("kernel_version", None),
kube_proxy_version=node_info.get("kube_proxy_version", None),
kubelet_version=node_info.get("kubelet_version", None),
machine_id=node_info.get("machine_id", None),
operating_system=node_info.get("operating_system", None),
os_image=node_info.get("os_image", None),
system_uuid=node_info.get("system_uuid", None)
)
v1NodeStatus.node_info = v1NodeSystemInfo
v1Node.status = v1NodeStatus
resp = coreApiClient.create_node(body=v1Node, pretty=True)
return (v1Node, resp)
except ApiException as e:
error = f'An Exception occured while executing the command :{e}'
pp.pprint(error)
return (None, None)
================================================
FILE: Kubernetes/legos/k8s_change_pvc_size/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_check_cronjob_pod_status/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_check_cronjob_pod_status/k8s_check_cronjob_pod_status.json
================================================
{
"action_title": "Check the status of K8s CronJob pods",
"action_description": "This action checks the status of CronJob pods",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_check_cronjob_pod_status",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": [
""
],
"action_next_hop_parameter_mapping": {},
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: Kubernetes/legos/k8s_check_cronjob_pod_status/k8s_check_cronjob_pod_status.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
from datetime import datetime, timezone
from kubernetes import client
from typing import Tuple, Optional
from pydantic import BaseModel, Field
from croniter import croniter
from datetime import datetime, timezone, timedelta
import json
class InputSchema(BaseModel):
namespace: Optional[str] = Field(..., description='k8s Namespace', title='Namespace')
time_interval_to_check: int = Field(
24,
description='Time interval in hours. This time window is used to check if pod in a cronjob was in Pending state. Default is 24 hours.',
title="Time Interval"
)
def k8s_check_cronjob_pod_status_printer(output):
status, issues = output
if status:
print("CronJobs are running as expected.")
else:
for issue in issues:
print(f"CronJob '{issue['cronjob_name']}' in namespace '{issue['namespace']}' has issues.")
def format_datetime(dt):
return dt.strftime("%Y-%m-%d %H:%M:%S %Z")
def k8s_check_cronjob_pod_status(handle, namespace: str='', time_interval_to_check=24) -> Tuple:
"""
Checks the status of the CronJob pods.
:type handle: object
:param handle: The Kubernetes client handle.
:type name: str
:param namespace: Namespace where the CronJob is deployed.
:return: A tuple where the first item has the status if the check and second has a list of failed objects.
"""
# Initialize the K8s API clients
batch_v1 = client.BatchV1Api(api_client=handle)
core_v1 = client.CoreV1Api(api_client=handle)
issues = []
current_time = datetime.now(timezone.utc)
interval_time_to_check = current_time - timedelta(hours=time_interval_to_check)
interval_time_to_check = interval_time_to_check.replace(tzinfo=timezone.utc)
# Get namespaces to check
if namespace:
namespaces = [namespace]
else:
ns_obj = core_v1.list_namespace()
namespaces = [ns.metadata.name for ns in ns_obj.items]
for ns in namespaces:
# Fetch all CronJobs in the namespace using kubectl
get_cronjob_command = f"kubectl get cronjobs -n {ns} -o=jsonpath='{{.items[*].metadata.name}}'"
response = handle.run_native_cmd(get_cronjob_command)
if not response or response.stderr:
raise Exception(f"Error fetching CronJobs for namespace {ns}: {response.stderr if response else 'empty response'}")
cronjob_names = response.stdout.split()
for cronjob_name in cronjob_names:
get_cronjob_details_command = f"kubectl get cronjob {cronjob_name} -n {ns} -o=json"
try:
response = handle.run_native_cmd(get_cronjob_details_command)
if response.stderr:
raise Exception(f"Error fetching details for CronJob {cronjob_name} in namespace {ns}: {response.stderr}")
except Exception as e:
print(f"Failed to fetch details for CronJob {cronjob_name} in namespace {ns}: {str(e)}")
continue
cronjob = json.loads(response.stdout)
# Fetch the most recent Job associated with the CronJob
jobs = batch_v1.list_namespaced_job(ns) # Fetch all jobs, and then filter by prefix.
associated_jobs = [job for job in jobs.items if job.metadata.name.startswith(cronjob['metadata']['name'])]
if not associated_jobs:
# If no associated jobs, that means the job is not scheduled.
continue
latest_job = sorted(associated_jobs, key=lambda x: x.status.start_time, reverse=True)[0]
# Check job's pods for any issues
pods = core_v1.list_namespaced_pod(ns, label_selector=f"job-name={latest_job.metadata.name}")
for pod in pods.items:
if pod.status.phase == 'Pending':
start_time = pod.status.start_time
if start_time and start_time >= interval_time_to_check:
issues.append({
"cronjob_name": cronjob_name,
"namespace": ns,
"pod_name": pod.metadata.name,
"start_time": format_datetime(start_time)
})
break
elif pod.status.phase not in ['Running', 'Succeeded','Completed']:
issues.append({
"cronjob_name": cronjob_name,
"namespace": ns,
"pod_name": pod.metadata.name,
"state": pod.status.phase
})
break
return (not issues, issues if issues else None)
================================================
FILE: Kubernetes/legos/k8s_check_service_pvc_utilization/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_check_service_pvc_utilization/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_check_service_pvc_utilization/k8s_check_service_pvc_utilization.json
================================================
{
"action_title": "Check K8s service PVC utilization ",
"action_description": "This check fetches the PVC associated with a given service, determines its utilized size, and then compares it to its total capacity. If the used percentage exceeds the provided threshold, it triggers an alert.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_check_service_pvc_utilization",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": [
""
],
"action_next_hop_parameter_mapping": {},
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: Kubernetes/legos/k8s_check_service_pvc_utilization/k8s_check_service_pvc_utilization.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
import re
import json
from typing import Tuple, Optional
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
namespace: str = Field(
...,
description="The namespace in which the service resides.",
title="Namespace",
)
core_services: list = Field(
...,
description="List of services for which the used PVC size needs to be checked.",
title="K8s Service name",
)
threshold: Optional[int] = Field(
80,
description="Percentage threshold for utilized PVC disk size.E.g., a 80% threshold checks if the utilized space exceeds 80% of the total PVC capacity.",
title="Threshold (in %)",
)
def k8s_check_service_pvc_utilization(
handle, core_services: list, namespace: str, threshold: int = 60
) -> Tuple:
"""
k8s_check_service_pvc_utilization checks the utilized disk size of a service's PVC against a given threshold.
This function fetches the PVC associated with a given service, determines its utilized size,
and then compares it to its total capacity. If the used percentage exceeds the provided threshold,
it triggers an alert.
:type handle: object
:param handle: Handle object to execute the kubectl command.
:type service_name: str
:param service_name: The name of the service.
:type threshold: int
:param threshold: Percentage threshold for utilized PVC disk size.
E.g., a 80% threshold checks if the utilized space exceeds 80% of the total PVC capacity.
:type namespace: str
:param namespace: The namespace in which the service resides.
:return: Status and dictionary with PVC name and its size information if the PVC's disk size exceeds threshold.
"""
alert_pvcs_all_services = []
services_without_pvcs = []
# Keep track of processed PVCs to avoid duplicates
processed_pvcs = set()
for svc in core_services:
# Get label associated with the service
get_service_labels_command = f"kubectl get services {svc} -n {namespace} -o=jsonpath='{{.spec.selector}}'"
response = handle.run_native_cmd(get_service_labels_command)
if not response.stdout.strip():
# No labels found for a particular service. Skipping...
continue
labels_dict = json.loads(response.stdout.replace("'", '"'))
label_selector = ",".join([f"{k}={v}" for k, v in labels_dict.items()])
# Fetch the pod attached to this service.
# The safer option is to try with the * option. Having a specific index like 0 or 1
# will lead to ApiException.
get_pod_command = f"kubectl get pods -n {namespace} -l {label_selector} -o=jsonpath='{{.items[*].metadata.name}}'"
response = handle.run_native_cmd(get_pod_command)
if not response or response.stderr:
raise ApiException(
f"Error while executing command ({get_pod_command}): {response.stderr if response else 'empty response'}"
)
# pod_names stores the output from the above kubectl command, which is a list of pod_names separated by space
pod_names = response.stdout.strip()
if not pod_names:
# No pods found for service {svc} in namespace {namespace} with labels {label_selector}
continue
# Fetch PVCs attached to the pod
# The Above kubectl command would return a string that is space separated name(s) of the pod.
# Given such a string, lets find out if we have one or more than one pod name in the string.
# If there are more than one pod name in the output, we need to iterate over all items[] array.
# Else we can directly access the persistentVolumeClaim name
# Lets also associate the pod_name along with the claim name (PVC Name) in the format of
# pod_name:pv_claim_name
if len(pod_names.split()) > 1:
json_path_cmd = '{range .items[*]}{.metadata.name}:{range .spec.volumes[*].persistentVolumeClaim}{.claimName} {end}{"\\n"}{end}'
else:
json_path_cmd = "{.metadata.name}:{range .spec.volumes[*].persistentVolumeClaim}{.claimName}{end}"
get_pvc_names_command = f"kubectl get pod {pod_names} -n {namespace} -o=jsonpath='{json_path_cmd}'"
response = handle.run_native_cmd(get_pvc_names_command)
if not response or response.stderr:
raise ApiException(
f"Error while executing command ({get_pvc_names_command}): {response.stderr if response else 'empty response'}"
)
# Example: ['lightbeam-elasticsearch-master-0:data-lightbeam-elasticsearch-master-0']
pod_and_pvc_names = response.stdout.strip().split()
# The pod_and_pvc_names
if not pod_and_pvc_names:
services_without_pvcs.append(svc)
continue
pvc_mounts = []
alert_pvcs = []
for element in pod_and_pvc_names:
pod_name, claim_name = element.split(":")
if not claim_name:
# Skip if Volume Claim name is empty.
continue
# Fetch the Pod JSON
# We need to get the container name (if any) from the Pod's JSON. This is needed
# if we want to exec into the POD that is within a container. The JSON data that
# we obtain is used to fill the pvc_mounts list, which is a list of dictionaries.
# We use this pvc_mounts to find out the used_space percentage. We compare that with
# the threshold to flag if the utilization is above threshold.
# df -kh is the command used to get the disk utilization. This is accurate as we get
# the disk utilization from the POD directly, rather than checking the resource limit
# and resource request from the deployment / stateful YAML file.
get_pod_json_command = (
f"kubectl get pod {pod_name} -n {namespace} -o json"
)
pod_json_output = handle.run_native_cmd(get_pod_json_command)
if not pod_json_output or pod_json_output.stderr:
raise ApiException(
f"Error fetching pod json for {pod_name}: {pod_json_output.stderr if pod_json_output else 'empty response'}"
)
pod_data = json.loads(pod_json_output.stdout)
# Dictionary .get() method with default value is way of error handling
for container in pod_data.get("spec", {}).get("containers", {}):
for mount in container.get("volumeMounts", {}):
for volume in pod_data.get("spec", {}).get("volumes", {}):
if "persistentVolumeClaim" in volume and volume.get(
"name"
) == mount.get("name"):
try:
claim_name = volume["persistentVolumeClaim"][
"claimName"
]
print(f"ClaimName: {claim_name}: MountName: {mount['name']} ContainerName: {container['name']}")
# Add mount info if not already added
mount_info = {
"container_name": container["name"],
"mount_path": mount["mountPath"],
"pvc_name": claim_name if claim_name else None,
"pod_name": pod_name
}
# Only add if this specific mount combination hasn't been processed yet
mount_key = f"{pod_name}:{container['name']}:{mount['mountPath']}:{claim_name}"
if mount_key not in processed_pvcs:
pvc_mounts.append(mount_info)
processed_pvcs.add(mount_key)
except KeyError as e:
# Handle the KeyError (e.g., log the error, skip this iteration, etc.)
print(f"KeyError: {e}. Skipping this entry.")
except IndexError as e:
# Handle the IndexError (e.g., log the error, skip this iteration, etc.)
print(f"IndexError: {e}. Skipping this entry.")
# Create a dictionary to store processed PVC info
pvc_info_dict = {}
# Process each mount separately with a single df command
for mount in pvc_mounts:
container_name = mount["container_name"]
mount_path = mount["mount_path"]
pvc_name = mount["pvc_name"]
pod_name = mount["pod_name"]
# Skip if we've already processed this PVC
if pvc_name in pvc_info_dict:
continue
du_command = f"kubectl exec -n {namespace} {pod_name} -c {container_name} -- df -kh {mount_path} | grep -v Filesystem"
du_output = handle.run_native_cmd(du_command)
if du_output and not du_output.stderr:
# Process each line of df output separately
df_lines = du_output.stdout.strip().split("\n")
for df_line in df_lines:
if not df_line.strip():
continue
# Split line into columns
columns = re.split(r"\s+", df_line.strip())
# Find the percentage column (contains '%')
percent_col = None
for i, col in enumerate(columns):
if "%" in col:
percent_col = i
break
if percent_col is None or len(columns) < 2:
print(f"Warning: Unexpected df output format: {df_line}")
continue
# Extract percentage and capacity
used_percentage = int(columns[percent_col].replace("%", ""))
total_capacity = columns[1] if len(columns) > 1 else "Unknown"
pvc_info = {
"pvc_name": pvc_name,
"mount_path": mount_path,
"used": used_percentage,
"capacity": total_capacity,
}
# Store in dictionary to prevent duplicates
pvc_info_dict[pvc_name] = pvc_info
# Check if usage exceeds threshold
if used_percentage > threshold:
alert_pvcs.append(pvc_info)
# Add unique alert PVCs to the main list
for pvc_info in alert_pvcs:
if pvc_info not in alert_pvcs_all_services:
alert_pvcs_all_services.append(pvc_info)
if services_without_pvcs:
print("Following services do not have any PVCs attached:")
for service in services_without_pvcs:
print(f"- {service}")
if alert_pvcs_all_services:
print(json.dumps(alert_pvcs_all_services, indent=4))
return (not bool(alert_pvcs_all_services), alert_pvcs_all_services)
================================================
FILE: Kubernetes/legos/k8s_check_service_status/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_check_service_status/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_check_service_status/k8s_check_service_status.json
================================================
{
"action_title": "Check K8s services endpoint and SSL certificate health",
"action_description": "Checks the health status of the provided list of endpoints and their SSL certificate status.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_check_service_status",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": [
""
],
"action_next_hop_parameter_mapping": {},
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: Kubernetes/legos/k8s_check_service_status/k8s_check_service_status.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import Tuple, List, Optional
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import requests
from pydantic import BaseModel, Field
from datetime import datetime, timedelta
import ssl
import socket
# Disabling insecure request warnings
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class InputSchema(BaseModel):
endpoints: list = Field(
..., description='The URLs of the endpoint whose SSL certificate is to be checked. Eg: ["https://www.google.com", "https://expired.badssl.com/"]', title='List of URLs'
)
threshold: Optional[int] = Field(
30,
description='The number of days within which, if the certificate is set to expire is considered a potential issue.',
title='K8s Namespace',
)
def k8s_check_service_status_printer(output):
status, results = output
if status:
print("All services are healthy.")
return
if "Error" in results[0]:
print(f"Error: {results[0]['Error']}")
return
print("\n" + "=" * 100)
for result in results:
print(f"Service:\t{result['endpoint']}")
print("-" * 100)
print(f"Status: {result['status']}\n")
print("=" * 100)
def check_ssl_expiry(endpoint, threshold):
hostname = endpoint.split("//")[-1].split("/")[0]
ssl_date_fmt = r'%b %d %H:%M:%S %Y %Z'
# Create an SSL context that restricts to secure versions of TLS
context = ssl.create_default_context()
context.check_hostname = True
context.verify_mode = ssl.CERT_REQUIRED
# Ensure that only TLSv1.2 and later are used (disabling TLSv1.0 and TLSv1.1) as TLS versions 1.0 and 1.1 are known to be vulnerable to attacks
context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
try:
with socket.create_connection((hostname, 443), timeout=10) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as ssl_sock:
ssl_info = ssl_sock.getpeercert()
expiry_date = datetime.strptime(ssl_info['notAfter'], ssl_date_fmt).date()
days_remaining = (expiry_date - datetime.utcnow().date()).days
if days_remaining <= threshold:
return (days_remaining, False)
else:
return (days_remaining, True)
except Exception as e:
raise e
def k8s_check_service_status(handle, endpoints:list, threshold: int = 30) -> Tuple:
"""
k8s_check_service_status Checks the health status of the provided endpoints.
:param endpoints: The URLs of the endpoint whose SSL certificate is to be checked. Eg: ["https://www.google.com", "https://expired.badssl.com/"]
:param threshold: The number of days within which, if the certificate is set to expire,
is considered a potential issue.
:return: Tuple with a boolean indicating if all services are healthy, and a list of dictionaries
with individual service status.
"""
failed_endpoints = []
for endpoint in endpoints:
status_info = {"endpoint": endpoint}
# Check if the endpoint is HTTPS or not
if endpoint.startswith("https://"):
try:
response = requests.get(endpoint, verify=True, timeout=5)
days_remaining, is_healthy = check_ssl_expiry(endpoint, threshold)
if not (response.status_code == 200 and is_healthy):
status_info["status"] = 'unhealthy'
reason = f'SSL expiring in {days_remaining} days.' if not is_healthy else f'Status code: {response.status_code}'
status_info["Reason"] = reason
failed_endpoints.append(status_info)
except requests.RequestException as e:
status_info["status"] = 'unhealthy'
reason = f'SSL error: {str(e)}' if 'CERTIFICATE_VERIFY_FAILED' in str(e) else f'Error: {str(e)}'
status_info["Reason"] = reason
failed_endpoints.append(status_info)
else:
# For non-HTTPS endpoints
try:
response = requests.get(endpoint, timeout=5)
if response.status_code != 200:
status_info["status"] = 'unhealthy'
status_info["Reason"] = f'Status code: {response.status_code}'
failed_endpoints.append(status_info)
except requests.RequestException as e:
status_info["status"] = 'unhealthy'
status_info["Reason"] = f'Error: {str(e)}'
failed_endpoints.append(status_info)
if failed_endpoints:
return (False, failed_endpoints)
else:
return (True, None)
================================================
FILE: Kubernetes/legos/k8s_check_worker_cpu_utilization/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_check_worker_cpu_utilization/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_check_worker_cpu_utilization/k8s_check_worker_cpu_utilization.json
================================================
{
"action_title": "Check K8s worker CPU Utilization",
"action_description": "Retrieves the CPU utilization for all worker nodes in the cluster and compares it to a given threshold.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_check_worker_cpu_utilization",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": [
""
],
"action_next_hop_parameter_mapping": {},
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_NODE"]
}
================================================
FILE: Kubernetes/legos/k8s_check_worker_cpu_utilization/k8s_check_worker_cpu_utilization.py
================================================
from __future__ import annotations
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
from pydantic import BaseModel, Field
from typing import Optional, Tuple
class InputSchema(BaseModel):
threshold: Optional[float] = Field(
70.0,
description='Threshold for CPU utilization in percentage.',
title='Threshold (in %)',
)
def k8s_check_worker_cpu_utilization_printer(output):
status, nodes_info = output
if status:
print("All nodes are within the CPU utilization threshold.")
return
print("ALERT: Nodes exceeding CPU utilization threshold:")
print("-" * 40)
for node_info in nodes_info:
print(f"Node: {node_info['node']} - CPU Utilization: {node_info['cpu']}%")
print("-" * 40)
def k8s_check_worker_cpu_utilization(handle, threshold: float=70.0) -> Tuple:
"""
k8s_check_worker_cpu_utilization Retrieves the CPU utilization for all worker nodes in the cluster and compares it to a given threshold.
:type handle: object
:param handle: Handle object to execute the kubectl command.
:type threshold: int
:param threshold: Threshold for CPU utilization in percentage.
:return: Status and dictionary with node names and their CPU information if any node's CPU utilization exceeds the threshold.
"""
exceeding_nodes = []
kubectl_command = "kubectl top nodes --no-headers"
response = handle.run_native_cmd(kubectl_command)
if response is None or response.stderr:
raise Exception(f"Error while executing command ({kubectl_command}): {response.stderr if response else 'empty response'}")
# Ensure response.stdout is processed only once and correctly
lines = response.stdout.strip().split('\n')
seen_nodes = set() # Keep track of nodes that have already been processed
for line in lines:
parts = line.split()
if len(parts) < 5: # Check for correct line format
continue
node_name, cpu_percentage_str = parts[0], parts[2].rstrip('%')
if node_name in seen_nodes:
print(f"Duplicate entry detected for node {node_name}, skipping.")
continue
seen_nodes.add(node_name)
cpu_percentage = float(cpu_percentage_str)
if cpu_percentage > threshold:
exceeding_nodes.append({"node": node_name, "cpu": cpu_percentage})
if exceeding_nodes:
return (False, exceeding_nodes)
return (True, None)
================================================
FILE: Kubernetes/legos/k8s_delete_pod/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_delete_pvc/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_delete_pvc/k8s_delete_pvc.json
================================================
{
"action_title": "Delete Kubernetes PVC",
"action_description": "This action force deletes a list of Kubernetes PVCs in a given Namespace.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_delete_pvc",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: Kubernetes/legos/k8s_delete_pvc/k8s_delete_pvc.py
================================================
from __future__ import annotations
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from typing import Dict
from kubernetes import client, config
from kubernetes.client.exceptions import ApiException
import pprint
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
namespace: str = Field(..., description='Kubernetes namespace', title='K8s namespace')
pvc_names: list = Field(..., description='List of K8S PVC Names. Eg: ["data-dir-1", "data-dir-2"]', title='List of PVC names')
def k8s_delete_pvc_printer(output):
if output is None:
return
pprint.pprint(output)
def k8s_delete_pvc(handle, namespace: str, pvc_names: list) -> Dict:
"""
k8s_delete_pvc force deletes one or more Kubernetes PVCs in a given Namespace.
:type handle: object
:param handle: Object returned from the Task validate method or Kubernetes client configuration
:type namespace: str
:param namespace: Kubernetes namespace
:type pvc_names: list
:param pvc_names: List of K8S PVC Names. Eg: ["data-dir-1", "data-dir-2"]
:rtype: Dict or str with information about the deletion or error.
"""
coreApiClient = client.CoreV1Api(api_client=handle)
responses = {}
for pvc_name in pvc_names:
try:
resp = coreApiClient.delete_namespaced_persistent_volume_claim(
name=pvc_name,
namespace=namespace,
body=client.V1DeleteOptions(propagation_policy='Foreground') # This forces the deletion
)
responses[pvc_name] = resp.status
except ApiException as e:
resp = 'An Exception occurred while executing the command ' + e.reason
responses[pvc_name] = resp
raise e
return responses
================================================
FILE: Kubernetes/legos/k8s_describe_node/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_describe_node/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_describe_node/k8s_describe_node.json
================================================
{
"action_title": "Describe Kubernetes Node",
"action_description": "Describe a Kubernetes Node",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_describe_node",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_NODE"]
}
================================================
FILE: Kubernetes/legos/k8s_describe_node/k8s_describe_node.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
node_name: str = Field(
title='Node',
description='Kubernetes Node name'
)
def k8s_desribe_node_printer(output):
if output is None:
return
pprint.pprint(output)
def k8s_describe_node(handle, node_name: str):
"""k8s_describe_node get nodes details
:type handle: object
:param handle: Object returned from the Task validate method
:type node_name: str
:param node_name: Kubernetes Node name.
:rtype: Dict of nodes details
"""
coreApiClient = client.CoreV1Api(handle)
try:
resp = coreApiClient.read_node(node_name, pretty=True)
except ApiException as e:
resp = 'An Exception occured while executing the command' + e.reason
return resp
================================================
FILE: Kubernetes/legos/k8s_describe_pod/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_describe_pod/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_describe_pod/k8s_describe_pod.json
================================================
{
"action_title": "Describe a Kubernetes POD in a given Namespace",
"action_description": "Describe a Kubernetes POD in a given Namespace",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_describe_pod",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_POD"]
}
================================================
FILE: Kubernetes/legos/k8s_describe_pod/k8s_describe_pod.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
import collections
from typing import Dict
import pprint
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
namespace: str = Field(
title='Namespace',
description='Kubernetes namespace')
podname: str = Field(
title='Pod',
description='K8S Pod Name')
def k8s_desribe_pod_printer(output):
if output is None:
return
pprint.pprint(output)
def k8s_describe_pod(handle, namespace: str, podname: str) -> Dict:
"""k8s_describe_pod get Kubernetes POD details
:type handle: object
:param handle: Object returned from the Task validate method
:type namespace: str
:param namespace: Kubernetes namespace.
:type podname: str
:param podname: K8S Pod Name.
:rtype: Dict of POD details
"""
coreApiClient = client.CoreV1Api(api_client=handle)
def cleanNullTerms(_dict):
"""Delete None values recursively from all of the dictionaries"""
for key, value in list(_dict.items()):
if isinstance(value, dict):
cleanNullTerms(value)
elif value is None:
del _dict[key]
elif isinstance(value, list):
for v_i in value:
if isinstance(v_i, dict):
cleanNullTerms(v_i)
return _dict
data = {}
try:
resp = coreApiClient.read_namespaced_pod(
name=podname, namespace=namespace)
resp = resp.to_dict()
del resp['metadata']['managed_fields']
resp = cleanNullTerms(resp)
data["Name"] = resp['metadata']['name']
data["Namespace"] = namespace
data["Priority"] = resp['spec']['priority']
data["Node"] = resp['spec']['node_name']
data["Start Time"] = resp['status']['start_time']
data["Labels"] = resp['metadata']['labels']
if "annotations" in resp['metadata']:
data["Annotations"] = resp['metadata']['annotations']
data["Status"] = resp['status']['phase']
data["IP"] = resp['status']['pod_ip']
data["IPS"] = resp['status'].get('pod_i_ps')
data["Controlled By"] = resp['metadata']['owner_references'][0]['kind'] + \
"/" + resp['metadata']['owner_references'][0]['name']
data["Containers"] = ''
####
for container in resp['spec']['containers']:
data[' ' + container['name']] = ''
for c in container:
data[' ' + c] = container[c]
# Container Index Represents the Number of containers in a given POD
container_index = 0
msglist = []
for c in resp['status']['container_statuses']:
data[' ' + c['name']] = ''
data[' ' + 'Container ID'] = c['container_id']
data[' ' + 'Image'] = c['image']
data[' ' + 'Image ID'] = c['image_id']
data[' ' + 'Port'] = resp['spec']['containers'][container_index]['ports']
if 'command' in resp['spec']['containers'][container_index]:
data[' ' + 'Command'] = resp['spec']['containers'][container_index]['command']
if 'args' in resp['spec']['containers'][container_index]:
data[' ' + 'Args'] = resp['spec']['containers'][container_index]['args']
data[' ' + 'State'] = ''
if c['state']['running'] is None and c['state']['waiting'] is not None:
data[' ' + 'Reason'] = c['state']['waiting']['reason']
if c['last_state']['terminated'] is not None:
msglist.append(c['last_state']['terminated']['message'])
container_index += 1
data['Conditions'] = ''
for c in resp["status"]["conditions"]:
data["Type"] = "Status"
data[c["type"]] = bool(c["status"])
data['Volumes:'] = ''
for container in resp["spec"]["volumes"]:
for c in container:
data[' ' + c] = container[c]
data['QoS Class:'] = resp['status'].get('qos_class')
tolerations = []
for toleration in resp['spec']['tolerations']:
tolerations.append(toleration["key"] + ":" + toleration["effect"] + " op=" + \
toleration["operator"] + " for " + str(toleration["toleration_seconds"]))
data['Tolerations'] = tolerations
data['Events'] = msglist
except ApiException as e:
resp = 'An Exception occured while executing the command' + e.reason
raise e
print('\n')
data = collections.OrderedDict(data)
return data
================================================
FILE: Kubernetes/legos/k8s_detect_service_crashes/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_detect_service_crashes/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_detect_service_crashes/k8s_detect_service_crashes.json
================================================
{
"action_title": "Detect K8s service crashes",
"action_description": "Detects service crashes by checking the logs of each pod for specific error messages.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_detect_service_crashes",
"action_is_check": true,
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_TROUBLESHOOTING","CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_POD" ],
"action_next_hop": [],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Kubernetes/legos/k8s_detect_service_crashes/k8s_detect_service_crashes.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
import json
import re
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from tabulate import tabulate
class InputSchema(BaseModel):
namespace: str = Field(
description='K8S Namespace',
title='K8S Namespace'
)
tail_lines: Optional[int] = Field(
100,
description='Number of log lines to fetch from each container. Defaults to 100.',
title='No. of lines (Default: 100)'
)
core_services: list = Field(
description='List of services to detect service crashes on.'
)
def k8s_detect_service_crashes_printer(output):
status, data = output
if status:
print("No detected errors in the logs of the pods.")
else:
headers = ["Pod", "Namespace", "Error", "Timestamp"]
table_data = [(entry["pod"], entry["namespace"], entry["error"], entry["timestamp"]) for entry in data]
print(tabulate(table_data, headers=headers, tablefmt="grid"))
def k8s_detect_service_crashes(handle, namespace: str, core_services:list, tail_lines: int = 100) -> Tuple:
"""
k8s_detect_service_crashes detects service crashes by checking the logs of each pod for specific error messages.
:type handle: object
:param handle: Object returned from the task.validate(...)
:type namespace: str
:param namespace: (Optional) String, K8S Namespace as python string
:type tail_lines: int
:param tail_lines: Number of log lines to fetch from each container. Defaults to 100.
:rtype: Status, List of objects of pods, namespaces that might have crashed along with the timestamp
"""
ERROR_PATTERNS = [
"Worker exiting",
"Exception"
# Add more error patterns here as necessary
]
ERROR_PATTERNS = ["Worker exiting", "Exception"] # Add more error patterns as necessary
crash_logs = []
# Retrieve all services and pods in the namespace just once
kubectl_cmd = f"kubectl -n {namespace} get services,pods -o json"
try:
response = handle.run_native_cmd(kubectl_cmd)
services_and_pods = json.loads(response.stdout.strip())["items"]
except json.JSONDecodeError as json_err:
print(f"Error parsing JSON response: {str(json_err)}")
return (True, None) # Return early if we can't parse the JSON at all
except Exception as e:
print(f"Unexpected error while fetching services and pods: {str(e)}")
return (True, None)
for service_name_to_check in core_services:
service_found = False
for item in services_and_pods:
if item.get("kind") == "Service" and item.get("metadata", {}).get("name") == service_name_to_check:
service_found = True
pod_labels = item.get('spec', {}).get("selector", None)
if pod_labels:
pod_selector = ",".join([f"{key}={value}" for key, value in pod_labels.items()])
try:
kubectl_logs_cmd = f"kubectl -n {namespace} logs --selector {pod_selector} --tail={tail_lines}"
pod_logs = handle.run_native_cmd(kubectl_logs_cmd).stdout.strip()
for error_pattern in ERROR_PATTERNS:
if re.search(error_pattern, pod_logs):
crash_logs.append({
"service": service_name_to_check,
"pod": item.get('metadata', {}).get('name', 'N/A'),
"namespace": item.get('metadata', {}).get('namespace', 'N/A'),
"error": error_pattern,
"timestamp": re.findall(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}", pod_logs)[-1] if re.search(error_pattern, pod_logs) else "Unknown Time"
})
except Exception as e:
# Log the error but don't stop execution
print(f"Error fetching logs for service {service_name_to_check}: {str(e)}")
pass
if not service_found:
print(f"Service {service_name_to_check} not found in namespace {namespace}. Continuing with next service.")
return (False, crash_logs) if crash_logs else (True, None)
================================================
FILE: Kubernetes/legos/k8s_exec_command_on_pod/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_exec_command_on_pod/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_exec_command_on_pod/k8s_exec_command_on_pod.json
================================================
{
"action_title": "Execute a command on a Kubernetes POD in a given Namespace",
"action_description": "Execute a command on a Kubernetes POD in a given Namespace",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_exec_command_on_pod",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_POD"]
}
================================================
FILE: Kubernetes/legos/k8s_exec_command_on_pod/k8s_exec_command_on_pod.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
import pprint
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.stream import stream
class InputSchema(BaseModel):
namespace: str = Field(
title='Namespace',
description='Kubernetes namespace.')
podname: str = Field(
title='Pod',
description='Kubernetes Pod Name')
command: str = Field(
title='Command',
description='Commands to execute on the Pod. Eg "df -k"')
def k8s_exec_command_on_pod_printer(output):
if output is None:
return
pprint.pprint(output)
def k8s_exec_command_on_pod(handle, namespace: str, podname: str, command: str) -> str:
"""k8s_exec_command_on_pod executes the given kubectl command on the pod
:type handle: object
:param handle: Object returned from the Task validate method
:type namespace: str
:param namespace: Kubernetes namespace.
:type podname: str
:param podname: Kubernetes Pod Name.
:type command: str
:param command: Commands to execute on the Pod.
:rtype: String, Output of the command in python string format
or Empty String in case of Error.
"""
coreApiClient = client.CoreV1Api(api_client=handle)
try:
resp = stream(coreApiClient.connect_get_namespaced_pod_exec,
podname,
namespace,
command=command.split(),
stderr=True,
stdin=True,
stdout=True,
tty=False
)
except Exception as e:
resp = f'An Exception occured while executing the command {e}'
return resp
================================================
FILE: Kubernetes/legos/k8s_exec_command_on_pods_and_filter/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_execute_helm_command/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_execute_helm_command/k8s_execute_helm_command.json
================================================
{
"action_title": "Helm command",
"action_description": "Execute helm command in K8S Cluster",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_execute_helm_command",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL"]
}
================================================
FILE: Kubernetes/legos/k8s_execute_helm_command/k8s_execute_helm_command.py
================================================
#
# Copyright (c) 2024 unSkript.com
# All rights reserved.
#
import subprocess
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
helm_command: str = Field(
title='Helm Command',
description='Helm command to execute in the K8s Cluster'
)
def k8s_execute_helm_command_printer(data: str):
if not data:
return
print(data)
def k8s_execute_helm_command(handle, helm_command: str) -> str:
"""k8s_execute_helm_command executes the given helm command in the k8s cluster
:type handle: object
:param handle: Object returned from the Task validate method
:type helm_command: str
:param helm_command: Helm Command that need to be executed
:rtype: String, Output of the given helm command. Empty string in case of error
"""
retval = None
if handle.client_side_validation is not True:
print(f"K8S Connector is invalid: {handle}")
return str()
if not helm_command:
print(f"Given helm command is empty, cannot proceed further!")
return str()
config_file = None
try:
config_file = handle.temp_config_file
except Exception as e:
print(f"ERROR: {str(e)}")
return str()
if config_file:
if not '--kubeconfig' in helm_command:
helm_command = helm_command.replace('helm',
f'helm --kubeconfig {config_file}')
else:
# Incluster configuration, so need not have any kubeconfig
pass
try:
result = subprocess.run(helm_command,
check=True,
shell=True,
capture_output=True,
text=True)
retval = result.stdout
# If error is set, then lets dump the error code
if result.stderr and result.returncode != 0:
print(result.stderr)
except subprocess.CalledProcessError as e:
error_message = f"Error running command: {e}\n{e.stderr.decode('utf-8')}" \
if e.stderr else f"Error running command: {e}"
print(error_message)
return retval
================================================
FILE: Kubernetes/legos/k8s_execute_local_script_on_a_pod/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_execute_local_script_on_a_pod/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_execute_local_script_on_a_pod/k8s_execute_local_script_on_a_pod.json
================================================
{
"action_title": "Execute local script on a pod",
"action_description": "Execute local script on a pod in a namespace",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_execute_local_script_on_a_pod",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL","CATEGORY_TYPE_K8S_POD"]
}
================================================
FILE: Kubernetes/legos/k8s_execute_local_script_on_a_pod/k8s_execute_local_script_on_a_pod.py
================================================
#
# Copyright (c) 2022 unSkript.com
# All rights reserved.
#
import pprint
from pydantic import BaseModel, Field
import os
#import subprocess
class InputSchema(BaseModel):
pod_name: str = Field(
title="Pod Name",
description="K8S Pod Name"
)
namespace: str = Field(
title="Namespace",
description="K8S Namespace where the POD exists"
)
file_name: str = Field(
title="Script filename with the full path",
description="Script filename with the full path. "
)
def k8s_execute_local_script_on_a_pod_printer(output):
if output is None:
return
pprint.pprint(output)
def k8s_execute_local_script_on_a_pod(handle, namespace: str, pod_name:str, file_name:str)->str:
"""k8s_execute_local_script_on_a_pod executes a given script on a pod
:type handle: object
:param handle: Object returned from the Task validate method
:type namespace: str
:param namespace: Namespace to get the pods from. Eg:"logging"
:type pod_name: str
:param pod_name: Pod name to to run the script.
:type file_name: str
:param file_name: Script file name.
:rtype: String of the result of the script that was run on the pod
"""
# Step 2: Copy the script to the pod using kubectl cp command
tmp_script_path = "/tmp/script.sh"
handle.run_native_cmd(f'kubectl cp {file_name} {namespace}/{pod_name}:{tmp_script_path}')
# Step 3: Make the script executable on the pod
handle.run_native_cmd(f'kubectl exec -n {namespace} {pod_name} -- chmod +x {tmp_script_path}')
# Step 4: Execute the script on the pod and get the output
command = f'kubectl exec -n {namespace} {pod_name} -- sh -c {tmp_script_path}'
result = handle.run_native_cmd(command)
# Remove the temporary script file
handle.run_native_cmd(f'kubectl exec -n {namespace} {pod_name} -- rm -f {tmp_script_path}')
if result.stderr not in ('', None):
raise result.stderr
return result.stdout
================================================
FILE: Kubernetes/legos/k8s_gather_data_for_pod_troubleshoot/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_gather_data_for_pod_troubleshoot/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_gather_data_for_pod_troubleshoot/k8s_gather_data_for_pod_troubleshoot.json
================================================
{
"action_title": "Gather Data for POD Troubleshoot",
"action_description": "Gather Data for POD Troubleshoot",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_gather_data_for_pod_troubleshoot",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL","CATEGORY_TYPE_K8S_POD"]
}
================================================
FILE: Kubernetes/legos/k8s_gather_data_for_pod_troubleshoot/k8s_gather_data_for_pod_troubleshoot.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
pod_name: str = Field(
title="Pod Name",
description="K8S Pod Name"
)
namespace: str = Field(
title="Namespace",
description="K8S Namespace where the POD exists"
)
def k8s_gather_data_for_pod_troubleshoot_printer(output):
if not output:
return
pprint.pprint(output)
def k8s_gather_data_for_pod_troubleshoot(handle, pod_name: str, namespace: str) -> dict:
"""k8s_gather_data_for_pod_troubleshoot This function gathers data from the k8s namespace
to assist in troubleshooting of a pod. The gathered data are returned in the form of a
Dictionary with `logs`, `events` and `details` keys.
:type handle: Object
:param handle: Object returned from task.validate(...) routine
:type pod_name: str
:param pod_name: Name of the K8S POD (Mandatory parameter)
:type namespace: str
:param namespace: Namespace where the above K8S POD is found (Mandatory parameter)
:rtype: Output of in the form of dictionary with `describe` and `logs` keys
"""
if not pod_name or not namespace:
raise TypeError("POD Name and Namespace are mandatory parameters, cannot be None")
retval = {}
# Get Describe POD details
kubectl_client = f'kubectl describe pod {pod_name} -n {namespace}'
result = handle.run_native_cmd(kubectl_client)
if result is None:
print(
f"Error while executing command ({kubectl_client}) (empty response)")
return {}
if result.stderr:
raise ApiException(
f"Error occurred while executing command {kubectl_client} {result.stderr}")
# Get Logs for the POD
kubectl_client = f'kubectl logs {pod_name} -n {namespace}'
result = handle.run_native_cmd(kubectl_client)
if not result.stderr:
retval['logs'] = result.stdout
else:
retval['error'] = result.stderr
return retval
================================================
FILE: Kubernetes/legos/k8s_gather_data_for_service_troubleshoot/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_gather_data_for_service_troubleshoot/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_gather_data_for_service_troubleshoot/k8s_gather_data_for_service_troubleshoot.json
================================================
{
"action_title": "Gather Data for K8S Service Troubleshoot",
"action_description": "Gather Data for K8S Service Troubleshoot",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_gather_data_for_service_troubleshoot",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ]
}
================================================
FILE: Kubernetes/legos/k8s_gather_data_for_service_troubleshoot/k8s_gather_data_for_service_troubleshoot.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
import pprint
import json
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
service_name: str = Field(
title="Service Name",
description="K8S Service Name to gather data"
)
namespace: str = Field(
title='Namespace',
description='k8s Namespace')
def k8s_gather_data_for_service_troubleshoot_printer(output):
if not output:
return
pprint.pprint(output)
def k8s_gather_data_for_service_troubleshoot(handle, servicename: str, namespace: str) -> dict:
"""k8s_gather_data_for_service_troubleshoot This utility function can be used to gather data
for a given service in a namespace.
:type handle: object
:param handle: Object returned from task.validate(...) function
:type servicename: str
:param servicename: Service Name that needs gathering data
:type namespace: str
:param namespace: K8S Namespace
:rtype: Dictionary containing the result
"""
if not namespace or not servicename :
raise Exception("Namespace and Servicename are mandatory parameter")
# Get Service Detail
describe_cmd = f'kubectl describe svc {servicename} -n {namespace}'
describe_output = handle.run_native_cmd(describe_cmd)
if describe_output is None:
print(
f"Error while executing command ({describe_cmd}) (empty response)")
return {}
if describe_output.stderr:
raise ApiException(
f"Error occurred while executing command {describe_cmd} {describe_output.stderr}")
retval = {}
if not describe_output.stderr:
retval['describe'] = describe_output.stdout
# To Get the Ingress rule, we first find out the name of the ingress
# Find out the ingress rules in the given namespace, find out the
# Matching rule in the ingress that matches the service name and append it
# to the `ingress` key.
rule_name = ''
ingress_rules_for_service = []
ingress_rule_name_cmd = f"kubectl get ingress -n {namespace} -o name"
ingress_rule_name_output = handle.run_native_cmd(ingress_rule_name_cmd)
if not ingress_rule_name_output.stderr:
rule_name = ingress_rule_name_output.stdout
ingress_rules_cmd = f"kubectl get ingress -n {namespace}" + \
' -o jsonpath="{.items[*].spec.rules}"'
ingress_rules_output = handle.run_native_cmd(ingress_rules_cmd)
if not ingress_rules_output.stderr:
rules = json.loads(ingress_rules_output.stdout)
for r in rules:
h = r.get('host')
for s_p in r.get('http').get('paths'):
if s_p.get('backend').get('service').get('name') == servicename:
ingress_rules_for_service.append([
h,
s_p.get('backend').get('service').get('port'),
s_p.get('path')
])
if ingress_rules_for_service:
retval['ingress'] = []
for ir in ingress_rules_for_service:
if len(ir) >= 3:
retval['ingress'].append({'name': rule_name,
'namespace': namespace,
'host': ir[0],
'port': ir[1],
'path': ir[-1]})
return retval
================================================
FILE: Kubernetes/legos/k8s_get_all_evicted_pods_from_namespace/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_all_evicted_pods_from_namespace/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_all_evicted_pods_from_namespace/k8s_get_all_evicted_pods_from_namespace.json
================================================
{
"action_title": "Get All Evicted PODS From Namespace",
"action_description": "This action get all evicted PODS from given namespace. If namespace not given it will get all the pods from all namespaces.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_all_evicted_pods_from_namespace",
"action_is_check": true,
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" , "CATEGORY_TYPE_K8S", "CATEGORY_TYPE_K8S_POD"],
"action_next_hop": ["a9b8a0c8ecdb5ef76f01e81689319f16095d6136620a4c7f78d57e81ba9a3ba0"],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Kubernetes/legos/k8s_get_all_evicted_pods_from_namespace/k8s_get_all_evicted_pods_from_namespace.py
================================================
#
# Copyright (c) 2022 unSkript.com
# All rights reserved.
#
import pprint
import json
from typing import Optional, Tuple
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
namespace: Optional[str] = Field(
default='',
title='Namespace',
description='k8s Namespace')
def k8s_get_all_evicted_pods_from_namespace_printer(output):
if output is None:
return
pprint.pprint(output)
def k8s_get_all_evicted_pods_from_namespace(handle, namespace: str = "") -> Tuple:
"""k8s_get_all_evicted_pods_from_namespace returns all evicted pods
:type handle: object
:param handle: Object returned from the Task validate method
:type namespace: str
:param namespace: k8s namespace.
:rtype: Tuple of status result and list of evicted pods
"""
if handle.client_side_validation is not True:
raise Exception(f"K8S Connector is invalid: {handle}")
# Define the kubectl command based on the namespace input
kubectl_command = "kubectl get pods --all-namespaces -o json"
if namespace:
kubectl_command = "kubectl get pods -n " + namespace + " -o json"
try:
response = handle.run_native_cmd(kubectl_command)
except Exception as e:
print(f"Error occurred while executing command {kubectl_command}: {str(e)}")
raise
if response is None:
print(f"Error while executing command ({kubectl_command}) (empty response)")
raise Exception("Empty response from kubectl command")
if response.stderr:
raise Exception(f"Error occurred while executing command {kubectl_command} {response.stderr}")
result = []
try:
pod_details = json.loads(response.stdout)
for pod in pod_details.get('items', []):
if pod['status']['phase'] == 'Failed' and any(cs.get('reason') == 'Evicted' for cs in pod['status'].get('conditions', [])):
pod_dict = {
"pod_name": pod["metadata"]["name"],
"namespace": pod["metadata"]["namespace"]
}
result.append(pod_dict)
except Exception:
raise
if result:
return (False, result)
return (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_all_pods/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_all_resources_utilization_info/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_all_resources_utilization_info/k8s_get_all_resources_utilization_info.json
================================================
{
"action_title": "Get K8s pods status and resource utilization info",
"action_description": "This action gets the pod status and resource utilization of various Kubernetes resources like jobs, services, persistent volumes.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_all_resources_utilization_info",
"action_needs_credential": "true",
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": "false",
"action_supports_iteration": "true",
"action_supports_poll": "true",
"action_categories": ["CATEGORY_TYPE_INFORMATION" ]
}
================================================
FILE: Kubernetes/legos/k8s_get_all_resources_utilization_info/k8s_get_all_resources_utilization_info.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
from typing import Optional, Dict
from tabulate import tabulate
import json
from kubernetes.client.rest import ApiException
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
namespace: Optional[str] = Field('', description='k8s Namespace', title='Namespace')
def k8s_get_all_resources_utilization_info_printer(data):
namespace = data['namespace']
for resource, rows in data.items():
if resource == 'namespace': # Skip the namespace key-value pair
continue
print(f"\n{resource.capitalize()}:")
if not rows: # Check if the resource list is empty
# print(f"No {resource} found in {namespace} namespace.")
continue # Skip to the next resource
if resource == 'pods':
headers = ['Namespace', 'Name', 'Status', 'CPU Usage (m)', 'Memory Usage (Mi)']
else:
headers = ['Name', 'Status']
print(tabulate(rows, headers, tablefmt='pretty'))
def k8s_get_all_resources_utilization_info(handle, namespace: str = "") -> Dict:
"""
k8s_get_all_resources_utilization_info fetches the pod status and resource utilization of various Kubernetes resources like jobs, services, persistent volumes.
:type handle: object
:param handle: Object returned from the Task validate method
:type namespace: string
:param namespace: Namespace in which to look for the resources. If not provided, all namespaces are considered
:rtype: Status, Message
"""
if handle.client_side_validation is not True:
print(f"K8S Connector is invalid: {handle}")
return False, "Invalid Handle"
namespace_option = f"--namespace={namespace}" if namespace else "--all-namespaces"
resources = ['pods', 'jobs'
# 'persistentvolumeclaims'
]
data = {resource: [] for resource in resources}
data['namespace'] = namespace # Store namespace in data dict
# Fetch current utilization of pods
pod_utilization_cmd = f"kubectl top pods {namespace_option} --no-headers"
pod_utilization = handle.run_native_cmd(pod_utilization_cmd)
if pod_utilization.stderr:
pass
pod_utilization_lines = pod_utilization.stdout.split('\n')
utilization_map = {}
for line in pod_utilization_lines:
parts = line.split()
if len(parts) < 3: # Skip if line doesn't contain enough parts
continue
pod_name, cpu_usage, memory_usage = parts[:3]
# Use a tuple of (namespace, pod_name) as the key to ensure uniqueness across namespaces
key = (namespace, pod_name) if namespace else (parts[0], pod_name)
utilization_map[key] = (cpu_usage, memory_usage)
for resource in resources:
cmd = f"kubectl get {resource} -o json {namespace_option}"
result = handle.run_native_cmd(cmd)
if result.stderr:
print(f"Error occurred while executing command {cmd}: {result.stderr}")
continue
items = json.loads(result.stdout)['items']
if not items: # If no items found, continue to ensure message is printed by printer function
continue
for item in items:
name = item['metadata']['name']
ns = item['metadata'].get('namespace', 'default')
status = 'Unknown'
if resource == 'pods':
status = item['status']['phase']
# Skip pods in Succeeded or Completed state as they dont have any utilization
if status in ['Succeeded', 'Completed', 'Failed','Pending']:
continue
key = (ns, name)
cpu_usage, memory_usage = utilization_map.get(key, ('N/A', 'N/A'))
data[resource].append([ns, name, status, cpu_usage, memory_usage])
else:
status = None
if resource == 'jobs':
conditions = item['status'].get('conditions', [])
if conditions:
status = conditions[-1]['type']
if status in ['Complete']:
continue
# elif resource == 'persistentvolumeclaims':
# status = item['status']['phase']
if status is not None:
data[resource].append([ns, name, status])
# If resource has no objects to display, filter it out
data = {k: v for k, v in data.items() if v}
return data
================================================
FILE: Kubernetes/legos/k8s_get_candidate_nodes_for_pods/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_candidate_nodes_for_pods/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_candidate_nodes_for_pods/k8s_get_candidate_nodes_for_pods.json
================================================
{
"action_title": "Get candidate k8s nodes for given configuration",
"action_description": "Get candidate k8s nodes for given configuration",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_candidate_nodes_for_pods",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_POD","CATEGORY_TYPE_K8S_NODE"]
}
================================================
FILE: Kubernetes/legos/k8s_get_candidate_nodes_for_pods/k8s_get_candidate_nodes_for_pods.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from tabulate import tabulate
from kubernetes import client
pp = pprint.PrettyPrinter(indent=2)
class InputSchema(BaseModel):
cpu_limit: Optional[int] = Field(
default=0,
title='CPU Limit',
description='CPU Limit. Eg 2')
memory_limit: Optional[str] = Field(
default="",
title='Memory Limit (Mi)',
description='Limits and requests for memory are measured in bytes. '
'Accept the store in Mi. Eg 123Mi')
pod_limit: Optional[int] = Field(
default=0,
title='Number of Pods to attach',
description='Pod Limit. Eg 2')
def k8s_get_candidate_nodes_for_pods_printer(output):
if output is None:
return
data = output[0]
print("\n")
print(tabulate(data, tablefmt="grid", headers=[
"Name",
"cpu",
"ephemeral-storage",
"hugepages-1Gi",
"hugepages-2Mi",
"memory",
"pods"
]))
def k8s_get_candidate_nodes_for_pods(handle,
cpu_limit: int = 0,
memory_limit: str = "",
pod_limit: int = 0) -> Tuple:
"""k8s_get_candidate_nodes_for_pods get nodes for pod
:type handle: object
:param handle: Object returned from the Task validate method
:type cpu_limit: int
:param cpu_limit: CPU Limit.
:type memory_limit: string
:param memory_limit: Limits and requests for memory are measured in bytes.
:type pod_limit: int
:param pod_limit: Pod Limit.
:rtype: Tuple of nodes for pod
"""
coreApiClient = client.CoreV1Api(api_client=handle)
nodes = coreApiClient.list_node()
match_nodes = [node for node in nodes.items if
(cpu_limit < int(node.status.capacity.get("cpu", 0))) and
(pod_limit < int(node.status.capacity.get("pods", 0))) and
int(memory_limit.split("Mi")[0]) < (int(node.status.capacity.get("memory").split("Ki")[0]) / 1024)]
if len(match_nodes) > 0:
data = []
for node in match_nodes:
node_capacity = []
node_capacity.append(node.metadata.name)
for capacity in node.status.capacity.values():
node_capacity.append(capacity)
data.append(node_capacity)
return (data, match_nodes)
pp.pprint("No Matching Nodes Found for this spec")
return (None, None)
================================================
FILE: Kubernetes/legos/k8s_get_cluster_health/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_cluster_health/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_cluster_health/k8s_get_cluster_health.json
================================================
{
"action_title": "Get K8S Cluster Health",
"action_description": "Get K8S Cluster Health",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_cluster_health",
"action_needs_credential": true,
"action_supports_poll": true,
"action_is_check": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_POD","CATEGORY_TYPE_K8S_CLUSTER"],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Kubernetes/legos/k8s_get_cluster_health/k8s_get_cluster_health.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import json
from typing import Tuple, Optional
from pydantic import BaseModel, Field
from kubernetes import client
class InputSchema(BaseModel):
core_services: Optional[list] = Field(
default=[],
title="Core Services",
description="List of core services names to check for health. If empty, checks all services."
)
namespace: Optional[str] = Field(
default="",
title="Namespace",
description="Namespace of the core services. If empty, checks all namespaces."
)
def k8s_get_cluster_health_printer(output):
status, health_issues = output
if status:
print("Cluster Health: OK\n")
else:
print("Cluster Health: NOT OK\n")
for issue in health_issues:
print(f"Type: {issue['type']}")
print(f"Name: {issue['name']}")
print(f"Namespace: {issue.get('namespace', 'N/A')}")
print(f"Issue: {issue['issue']}")
print("-" * 40)
def execute_kubectl_command(handle, command: str):
response = handle.run_native_cmd(command)
if response.stderr.lower():
print(f"Warning: {response.stderr}")
if "not found" in response.stderr.lower():
return None # Service not found in the given namespace, skip this service
if response:
if response.stdout:
return response.stdout.strip()
else:
print(f"No output for command: {command}")
return None
def get_namespaces(handle):
command = "kubectl get ns -o=jsonpath='{.items[*].metadata.name}'"
namespaces_str = execute_kubectl_command(handle, command)
if namespaces_str:
return namespaces_str.split()
return []
def get_label_selector_for_service(handle, namespace: str, service_name: str):
command = f"kubectl get svc {service_name} -n {namespace} -o=jsonpath='{{.spec.selector}}'"
label_selector_json = execute_kubectl_command(handle, command)
if label_selector_json:
labels_dict = json.loads(label_selector_json.replace("'", "\""))
return ",".join([f"{k}={v}" for k, v in labels_dict.items()])
return ''
def check_node_health(node_api):
health_issues = []
nodes = node_api.list_node()
for node in nodes.items:
ready_condition = next((condition for condition in node.status.conditions if condition.type == "Ready"), None)
if not ready_condition or ready_condition.status != "True":
health_issues.append({
"type": "Node",
"name": node.metadata.name,
"issue": f"Node is not ready. Condition: {ready_condition.type if ready_condition else 'None'}, Status: {ready_condition.status if ready_condition else 'None'}"
})
return health_issues
def check_pod_health(handle, core_services, namespace):
health_issues = []
namespaces = [namespace] if namespace else get_namespaces(handle)
for ns in namespaces:
if core_services:
for service in core_services:
label_selector = get_label_selector_for_service(handle, ns, service)
if label_selector:
# Get all pods for the service
command_pods = f"kubectl get pods -n {ns} -l {label_selector} -o=json"
pods_info = execute_kubectl_command(handle, command_pods)
if pods_info:
pods_data = json.loads(pods_info)
total_pods = len(pods_data['items'])
running_pods = sum(1 for item in pods_data['items'] if item['status']['phase'] == "Running")
# Check if at least 70% of pods are running
if total_pods > 0:
running_percentage = (running_pods / total_pods) * 100
if running_percentage < 70:
health_issues.append({
"type": "Pod",
"name": service,
"namespace": ns,
"issue": f"Insufficient running pods. Only {running_pods} out of {total_pods} are running."
})
else:
print(f"No pods found for service {service} in namespace {ns}.")
else:
print(f"No label selector found for service {service} in namespace {ns}. Skipping...")
else:
# Check all pods in the namespace if no specific services are given
command = f"kubectl get pods -n {ns} -o=jsonpath='{{.items[?(@.status.phase!=\"Running\")].metadata.name}}'"
pods_not_running = execute_kubectl_command(handle, command)
if pods_not_running:
for pod_name in pods_not_running.split():
health_issues.append({"type": "Pod", "name": pod_name, "namespace": ns, "issue": "Pod is not running."})
return health_issues
def check_deployment_health(handle, core_services, namespace):
health_issues = []
namespaces = [namespace] if namespace else get_namespaces(handle)
for ns in namespaces:
if core_services:
for service in core_services:
label_selector = get_label_selector_for_service(handle, ns, service)
if label_selector:
command = f"kubectl get deployments -n {ns} -l {label_selector} -o=jsonpath='{{.items[?(@.status.readyReplicas!=@.status.replicas)].metadata.name}}'"
deployments_not_ready = execute_kubectl_command(handle, command)
if deployments_not_ready:
for deployment_name in deployments_not_ready.split():
health_issues.append({"type": "Deployment", "name": deployment_name, "namespace": ns, "issue": "Deployment has replicas mismatch or is not available/progressing."})
else:
print(f"Service {service} not found or has no selectors in namespace {ns}. Skipping...")
else:
# Check all deployments in the namespace if no specific services are given
command = f"kubectl get deployments -n {ns} -o=jsonpath='{{.items[?(@.status.readyReplicas!=@.status.replicas)].metadata.name}}'"
deployments_not_ready = execute_kubectl_command(handle, command)
if deployments_not_ready:
for deployment_name in deployments_not_ready.split():
health_issues.append({"type": "Deployment", "name": deployment_name, "namespace": ns, "issue": "Deployment has replicas mismatch or is not available/progressing."})
return health_issues
def k8s_get_cluster_health(handle, core_services: list = [], namespace: str = "") -> Tuple:
node_api = client.CoreV1Api(api_client=handle)
health_issues = check_node_health(node_api) + check_pod_health(handle, core_services, namespace) + check_deployment_health(handle, core_services, namespace)
if health_issues:
return (False, health_issues)
else:
return (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_config_map_kube_system/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_config_map_kube_system/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_config_map_kube_system/k8s_get_config_map_kube_system.json
================================================
{
"action_title": "Get k8s kube system config map",
"action_description": "Get k8s kube system config map",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_config_map_kube_system",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories":[ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S"]
}
================================================
FILE: Kubernetes/legos/k8s_get_config_map_kube_system/k8s_get_config_map_kube_system.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from typing import Optional, List
from pydantic import BaseModel, Field
from tabulate import tabulate
from unskript.legos.kubernetes.k8s_kubectl_command.k8s_kubectl_command import k8s_kubectl_command
from kubernetes import client
class InputSchema(BaseModel):
namespace: Optional[str] = Field(
default="",
title='Namespace',
description='Kubernetes namespace')
config_map_name: str = Field(
default="",
title='Config Map',
description='Kubernetes Config Map Name')
def k8s_get_config_map_kube_system_printer(output):
if output is None:
return
for x in output:
for k, v in x.items():
if k == 'details':
for config in v:
data_set_1 = []
data_set_1.append("Name:")
data_set_1.append(config.metadata.name)
data_set_2 = []
data_set_2.append("Namespace:")
data_set_2.append(config.metadata.namespace)
data_set_3 = []
data_set_3.append("Labels:")
data_set_3.append(config.metadata.labels)
data_set_4 = []
data_set_4.append("Annotations:")
data_set_4.append(config.metadata.annotations)
data_set_5 = []
data_set_5.append("Data:")
data_set_5.append(config.data)
tabular_config_map = []
tabular_config_map.append(data_set_1)
tabular_config_map.append(data_set_2)
tabular_config_map.append(data_set_3)
tabular_config_map.append(data_set_4)
tabular_config_map.append(data_set_5)
print(tabulate(tabular_config_map, tablefmt="github"))
def k8s_get_config_map_kube_system(handle, config_map_name: str = '', namespace: str = '') -> List:
"""k8s_get_config_map_kube_system get kube system config map
:type handle: object
:param handle: Object returned from the Task validate method
:type config_map_name: str
:param config_map_name: Kubernetes Config Map Name.
:type namespace: str
:param namespace: Kubernetes namespace.
:rtype: List of system kube config maps for a given namespace
"""
all_namespaces = [namespace]
cmd = "kubectl get ns --no-headers -o custom-columns=':metadata.name'"
if namespace is None or len(namespace) == 0:
kubernetes_namespaces = k8s_kubectl_command(
handle=handle, kubectl_command=cmd)
replaced_str = kubernetes_namespaces.replace("\n", " ")
stripped_str = replaced_str.strip()
all_namespaces = stripped_str.split(" ")
result = []
coreApiClient = client.CoreV1Api(api_client=handle)
for n in all_namespaces:
config_map_dict = {}
res = coreApiClient.list_namespaced_config_map(
namespace=n, pretty=True)
if len(res.items) > 0:
if config_map_name:
config_maps = list(
filter(lambda x: (x.metadata.name == config_map_name), res.items))
else:
config_maps = res.items
config_map_dict["namespace"] = n
config_map_dict["details"] = config_maps
result.append(config_map_dict)
return result
================================================
FILE: Kubernetes/legos/k8s_get_deployment/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_deployment_status/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_deployment_status/k8s_get_deployment_status.json
================================================
{
"action_title": "Get Deployment Status",
"action_description": "This action search for failed deployment status and returns list.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_deployment_status",
"action_is_check": true,
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ,"CATEGORY_TYPE_K8S"],
"action_next_hop": ["65afc892db3d7ef487fe2353282bf94351e4674a34f56cd0349a2ad920897ddd"],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Kubernetes/legos/k8s_get_deployment_status/k8s_get_deployment_status.py
================================================
#
# Copyright (c) 2022 unSkript.com
# All rights reserved.
#
import pprint
import json
from typing import Optional, Tuple
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
namespace: Optional[str] = Field(
default='',
title='Namespace',
description='k8s Namespace')
deployment: Optional[str] = Field(
default='',
title='Deployment',
description='k8s Deployment')
def k8s_get_deployment_status_printer(output):
if output is None:
return
pprint.pprint(output)
def k8s_get_deployment_status(handle, deployment: str = "", namespace: str = "") -> Tuple:
"""k8s_get_deployment_status executes the command and give failed deployment list
:type handle: object
:param handle: Object returned from the Task validate method
:type deployment: str
:param deployment: Deployment Name.
:type namespace: str
:param namespace: Kubernetes Namespace.
:rtype: Tuple with status result and list of failed deployments.
"""
result = []
if handle.client_side_validation is not True:
print(f"K8S Connector is invalid: {handle}")
raise Exception("K8S Connector is invalid")
status_details = ""
if namespace and deployment:
name_cmd = "kubectl get deployment " + deployment + " -n " + namespace + " -o json"
exec_cmd = handle.run_native_cmd(name_cmd)
status_op = exec_cmd.stdout
status_details = json.loads(status_op)
if not namespace and not deployment:
name_cmd = "kubectl get deployments --all-namespaces -o json"
exec_cmd = handle.run_native_cmd(name_cmd)
status_op = exec_cmd.stdout
status_details = json.loads(status_op)
if namespace and not deployment:
name_cmd = "kubectl get deployment -n " + namespace + " -o json"
exec_cmd = handle.run_native_cmd(name_cmd)
status_op = exec_cmd.stdout
status_details = json.loads(status_op)
if deployment and not namespace:
name_cmd = "kubectl get deployment " + deployment + " -o json"
exec_cmd = handle.run_native_cmd(name_cmd)
status_op = exec_cmd.stdout
status_details = json.loads(status_op)
if status_details:
if "items" in status_details:
for items in status_details["items"]:
namespace_name = items["metadata"]["namespace"]
deployment_name = items["metadata"]["name"]
replica_details = items["status"]["conditions"]
for i in replica_details:
deployment_dict = {}
if ("FailedCreate" in i["reason"] and "ReplicaFailure" in i["type"] and
"True" in i["status"]):
deployment_dict["namespace"] = namespace_name
deployment_dict["deployment_name"] = deployment_name
result.append(deployment_dict)
if ("ProgressDeadlineExceeded" in i["reason"] and "Progressing" in i["type"] and
"False" in i["status"]):
deployment_dict["namespace"] = namespace_name
deployment_dict["deployment_name"] = deployment_name
result.append(deployment_dict)
else:
namespace_name = status_details["metadata"]["namespace"]
deployment_name = status_details["metadata"]["name"]
replica_details = status_details["status"]["conditions"]
for i in replica_details:
deployment_dict = {}
if ("FailedCreate" in i["reason"] and "ReplicaFailure" in i["type"] and
"True" in i["status"]):
deployment_dict["namespace"] = namespace_name
deployment_dict["deployment_name"] = deployment_name
result.append(deployment_dict)
if ("ProgressDeadlineExceeded" in i["reason"] and "Progressing" in i["type"] and
"False" in i["status"]):
deployment_dict["namespace"] = namespace_name
deployment_dict["deployment_name"] = deployment_name
result.append(deployment_dict)
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_error_pods_from_all_jobs/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_error_pods_from_all_jobs/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_error_pods_from_all_jobs/k8s_get_error_pods_from_all_jobs.json
================================================
{
"action_title": "Get Kubernetes Error PODs from All Jobs",
"action_description": "Get Kubernetes Error PODs from All Jobs",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_error_pods_from_all_jobs",
"action_is_check": true,
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_TROUBLESHOOTING","CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_POD" ],
"action_next_hop": ["88e97c46ad944d2f0541cd1f87e3ec5b8a4619f6093e89b55cec53b2a47e45aa"],
"action_next_hop_parameter_mapping": {"88e97c46ad944d2f0541cd1f87e3ec5b8a4619f6093e89b55cec53b2a47e45aa": {"name": "IP Exhaustion Mitigation: Failing K8s Pod Deletion from Jobs","namespace":".[0].namespace","pod_names":"map(.pod_name)"}}
}
================================================
FILE: Kubernetes/legos/k8s_get_error_pods_from_all_jobs/k8s_get_error_pods_from_all_jobs.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
import pprint
from typing import Tuple, Optional
from pydantic import BaseModel, Field
import json
class InputSchema(BaseModel):
namespace: Optional[str] = Field(
default='',
title='Namespace',
description='k8s Namespace')
def k8s_get_error_pods_from_all_jobs_printer(output):
if output is None:
return
pprint.pprint(output)
def k8s_get_error_pods_from_all_jobs(handle, namespace:str="") -> Tuple:
"""k8s_get_error_pods_from_all_jobs This check function uses the handle's native command
method to execute a pre-defined kubectl command and returns the output of list of error pods
from all jobs.
:type handle: Object
:param handle: Object returned from the task.validate(...) function
:rtype: Tuple Result in tuple format.
"""
result = []
# Fetch jobs for a particular namespace or if not given all namespaces
ns_cmd = f"-n {namespace}" if namespace else "--all-namespaces"
kubectl_cmd = f"kubectl get jobs {ns_cmd} -o json"
response = handle.run_native_cmd(kubectl_cmd)
if response.stderr:
raise Exception(f"Error occurred while executing command {kubectl_cmd}: {response.stderr}")
jobs = {}
try:
if response.stdout:
jobs = json.loads(response.stdout)
except json.JSONDecodeError:
raise Exception("Failed to parse JSON output from kubectl command.")
for job in jobs.get("items", []):
job_name = job["metadata"]["name"]
job_namespace = job["metadata"]["namespace"]
# Fetch pods for each job
pod_kubectl_cmd = f"kubectl get pods -n {job_namespace} -l job-name={job_name} -o json"
pod_response = handle.run_native_cmd(pod_kubectl_cmd)
if pod_response.stderr:
print(f"Error occurred while fetching pods for job {job_name}: {pod_response.stderr}")
continue
pods = {}
try:
if response.stdout:
pods = json.loads(pod_response.stdout)
except json.JSONDecodeError:
print(f"Failed to parse JSON pod response output for kubectl command: {pod_kubectl_cmd}")
pass
for pod in pods.get("items", []):
if pod["status"]["phase"] not in ["Succeeded", "Running"]:
result.append({"pod_name": pod["metadata"]["name"],
"job_name": job_name,
"namespace": pod["metadata"]["namespace"]
})
if result:
return (False, result)
else:
return (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_expiring_cluster_certificate/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_expiring_cluster_certificate/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_expiring_cluster_certificate/k8s_get_expiring_cluster_certificate.json
================================================
{
"action_title": "Check expiry of K8s cluster certificate",
"action_description": "Check expiry of K8s cluster certificate",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_expiring_cluster_certificate",
"action_is_check": true,
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ,"CATEGORY_TYPE_K8S"],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Kubernetes/legos/k8s_get_expiring_cluster_certificate/k8s_get_expiring_cluster_certificate.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Tuple
import base64
import datetime
from cryptography import x509
from cryptography.hazmat.backends import default_backend
class InputSchema(BaseModel):
expiring_threshold: Optional[int] = Field(
default=7,
title='Expiration Threshold (in days)',
description='Expiration Threshold of certificates (in days). Default- 90 days')
def k8s_get_expiring_cluster_certificate_printer(output):
if output is None:
return
success, data = output
if not success:
print(data)
else:
print("K8s certificate is valid.")
def get_expiry_date(pem_data: str) -> datetime.datetime:
cert = x509.load_pem_x509_certificate(pem_data.encode(), default_backend())
return cert.not_valid_after
def k8s_get_expiring_cluster_certificate(handle, expiring_threshold:int=7) -> Tuple:
"""
Check the validity for a K8s cluster certificate.
Args:
handle: Object of type unSkript K8S Connector
expiration_threshold (int): The threshold (in days) for considering a certificate as expiring soon.
Returns:
tuple: Status, details of the certificate.
"""
result = []
try:
# Fetch cluster CA certificate
ca_cert = handle.run_native_cmd("kubectl get secret -o jsonpath=\"{.items[?(@.type=='kubernetes.io/service-account-token')].data['ca\\.crt']}\" --all-namespaces")
if ca_cert.stderr:
raise Exception(f"Error occurred while fetching cluster CA certificate: {ca_cert.stderr}")
# Decode and check expiry date of the cluster's CA certificate
ca_cert_decoded = base64.b64decode(ca_cert.stdout.strip()).decode("utf-8")
ca_cert_exp = get_expiry_date(ca_cert_decoded)
days_remaining = (ca_cert_exp - datetime.datetime.now()).days
if days_remaining < 0:
# Certificate has already expired
result.append({
"certificate": "Kubeconfig Cluster certificate",
"days_remaining": days_remaining,
"status": "Expired"
})
elif ca_cert_exp < datetime.datetime.now() + datetime.timedelta(days=expiring_threshold):
result.append({
"certificate": "Kubeconfig Cluster certificate",
"days_remaining": days_remaining,
"status": "Expiring Soon"
})
except Exception as e:
print(f"Error occurred while checking cluster CA certificate: {e}")
raise e
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_expiring_tls_secret_certificates/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_expiring_tls_secret_certificates/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_expiring_tls_secret_certificates/k8s_get_expiring_tls_secret_certificates.json
================================================
{
"action_title": "Get expiring secret certificates",
"action_description": "Get the expiring secret certificates for a K8s cluster.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_expiring_tls_secret_certificates",
"action_is_check": true,
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ,"CATEGORY_TYPE_K8S"],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Kubernetes/legos/k8s_get_expiring_tls_secret_certificates/k8s_get_expiring_tls_secret_certificates.py
================================================
##
# Copyright (c) 2024 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
from typing import Optional, Tuple
import base64
import datetime
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from kubernetes import client, watch
from kubernetes.client.rest import ApiException
from tabulate import tabulate
class InputSchema(BaseModel):
namespace: Optional[str] = Field(
default='',
title='Namespace',
description='K8s Namespace. Default- all namespaces')
expiring_threshold: Optional[int] = Field(
default=7,
title='Expiration Threshold (in days)',
description='Expiration Threshold of certificates (in days). Default- 90 days')
def k8s_get_expiring_tls_secret_certificates_printer(output):
if output is None:
return
success, data = output
if not success:
headers = ['Secret Name', 'Namespace']
table = [[item['secret_name'], item['namespace']] for item in data]
print(tabulate(table, headers=headers, tablefmt='grid'))
else:
print("No expiring certificates found.")
def get_expiry_date(pem_data: str) -> datetime.datetime:
cert = x509.load_pem_x509_certificate(pem_data.encode(), default_backend())
return cert.not_valid_after
def k8s_get_expiring_tls_secret_certificates(handle, namespace:str='', expiring_threshold:int=7) -> Tuple:
"""
Get the expiring TLS secret certificates for a K8s cluster.
Args:
handle: Object of type unSkript K8S Connector
namespace (str): The Kubernetes namespace where the certificates are stored.
expiration_threshold (int): The threshold (in days) for considering a certificate as expiring soon.
Returns:
tuple: Status, a list of expiring certificate names.
"""
result = []
coreApiClient = client.CoreV1Api(api_client=handle)
try:
if namespace:
# Check if namespace exists and has secrets
secrets = coreApiClient.list_namespaced_secret(namespace, watch=False, limit=1).items
if not secrets:
return (True, None) # No secrets in the namespace
all_namespaces = [namespace]
else:
all_namespaces = [ns.metadata.name for ns in coreApiClient.list_namespace().items]
except ApiException as e:
print(f"Error occurred while accessing Kubernetes API: {e}")
return False, None
for n in all_namespaces:
secrets = coreApiClient.list_namespaced_secret(n, watch=False, limit=200).items
for secret in secrets:
# Check if the secret contains a certificate
if secret.type == "kubernetes.io/tls":
# Get the certificate data
cert_data = secret.data.get("tls.crt")
if cert_data:
# Decode the certificate data
cert_data_decoded = base64.b64decode(cert_data).decode("utf-8")
# Parse the certificate expiration date
cert_exp = get_expiry_date(cert_data_decoded)
days_remaining = (cert_exp - datetime.datetime.now()).days
if days_remaining < 0:
# Certificate has already expired
result.append({
"secret_name": secret.metadata.name,
"namespace": n,
"days_remaining": days_remaining,
"status": "Expired"
})
elif cert_exp and cert_exp < datetime.datetime.now() + datetime.timedelta(days=expiring_threshold):
result.append({
"secret_name": secret.metadata.name,
"namespace": n,
"days_remaining": days_remaining,
"status": "Expiring Soon" # Indicating the certificate is close to expiring
})
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_failed_deployments/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_failed_deployments/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_failed_deployments/k8s_get_failed_deployments.json
================================================
{
"action_title": "Get Kubernetes Failed Deployments",
"action_description": "Get Kubernetes Failed Deployments",
"action_type": "LEGO_TYPE_K8S",
"action_is_check": true,
"action_entry_function": "k8s_get_failed_deployments",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S"],
"action_next_hop": [],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Kubernetes/legos/k8s_get_failed_deployments/k8s_get_failed_deployments.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
import json
from typing import Tuple
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
namespace: str = Field(
'',
description="K8S Namespace",
title="K8S Namespace"
)
def k8s_get_failed_deployments_printer(output):
if output is None:
return
print(output)
def k8s_get_failed_deployments(handle, namespace: str = '') -> Tuple:
"""k8s_get_failed_deployments Returns all failed deployments across all namespaces
or within a specific namespace if provided. The deployments are considered
failed if the 'Available' condition is set to 'False'.
:type handle: Object
:param handle: Object returned from task.validate(...) function
:type namespace: str
:param namespace: The specific namespace to filter the deployments. Defaults to ''.
:rtype: Status of result, list of dictionaries, each containing the 'name' and 'namespace' of the failed deployments.
"""
# Construct the kubectl command based on whether a namespace is provided
kubectl_command = "kubectl get deployments --all-namespaces -o json"
if namespace:
kubectl_command = "kubectl get deployments -n " + namespace + " -o json"
# Execute kubectl command
response = handle.run_native_cmd(kubectl_command)
# Check if the response is None, which indicates an error
if response is None:
print(f"Error while executing command ({kubectl_command}) (empty response)")
if response.stderr:
raise Exception(f"Error occurred while executing command {kubectl_command} {response.stderr}")
result = []
try:
deployments = json.loads(response.stdout)
# Iterate over each item in the deployments
for item in deployments["items"]:
# Check each condition of the deployment
for condition in item["status"]["conditions"]:
# If the 'Available' condition is set to 'False', add the deployment to the result
if condition["type"] == "Available" and condition["status"] == "False":
result.append({
'name': item["metadata"]["name"],
'namespace': item["metadata"]["namespace"]
})
except Exception as e:
raise e
return (False, result) if result else (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_frequently_restarting_pods/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_frequently_restarting_pods/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_frequently_restarting_pods/k8s_get_frequently_restarting_pods.json
================================================
{
"action_title": "Get frequently restarting K8s pods",
"action_description": "Get Kubernetes pods from all namespaces that are restarting too often.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_frequently_restarting_pods",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ,"CATEGORY_TYPE_K8S"],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Kubernetes/legos/k8s_get_frequently_restarting_pods/k8s_get_frequently_restarting_pods.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import json
from typing import Optional, Tuple
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
restart_threshold: Optional[int] = Field(
default = 90,
description='Threshold number of times for which a pod should be restarting. Default is 90 times.',
title='Restart threshold',
)
def k8s_get_frequently_restarting_pods_printer(output):
if output is None:
return
print(output)
def k8s_get_frequently_restarting_pods(handle, restart_threshold:int=90) -> Tuple:
"""k8s_get_frequently_restarting_pods finds any K8s pods that have restarted more number of times than a given threshold
:type handle: object
:param handle: Object returned from the Task validate method
:type restart_threshold: int
:param restart_threshold: Threshold number of times for which a pod should be restarting
:rtype: Tuple of status and list of namespaces and pods that have restarted more than the threshold number of times.
"""
result = []
cmd = "kubectl get pods --all-namespaces --sort-by='.status.containerStatuses[0].restartCount' -o custom-columns='NAMESPACE:.metadata.namespace,NAME:.metadata.name,RESTART_COUNT:.status.containerStatuses[0].restartCount' -o json"
response = handle.run_native_cmd(cmd)
if response is None:
print(
f"Error while executing command ({cmd}) (empty response)")
if response.stderr:
raise Exception(
f"Error occurred while executing command {cmd} {response.stderr}")
all_pods_data = json.loads(response.stdout)
for pod_data in all_pods_data['items']:
pod = pod_data['metadata']['name']
nmspace = pod_data['metadata']['namespace']
# Check if 'containerStatuses' is present and if it's not empty
if 'containerStatuses' in pod_data['status'] and pod_data['status']['containerStatuses']:
restart_count = pod_data['status']['containerStatuses'][0]['restartCount']
if restart_count > restart_threshold:
pods_dict = {
'pod': pod,
'namespace': nmspace
}
result.append(pods_dict)
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_handle/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_handle/k8s_get_handle.json
================================================
{
"action_title": "Get Kubernetes Handle",
"action_description": "Get Kubernetes Handle",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_supports_iteration": false
}
================================================
FILE: Kubernetes/legos/k8s_get_handle/k8s_get_handle.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def k8s_get_handle(handle):
"""kubernetes_get_handle returns the kubernetes handle.
:rtype: kubernetes Handle.
"""
return handle
================================================
FILE: Kubernetes/legos/k8s_get_healthy_pods/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_healthy_pods/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_healthy_pods/k8s_get_healthy_pods.json
================================================
{
"action_title": "Get All Kubernetes Healthy PODS in a given Namespace",
"action_description": "Get All Kubernetes Healthy PODS in a given Namespace",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_healthy_pods",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ,"CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_POD" ]
}
================================================
FILE: Kubernetes/legos/k8s_get_healthy_pods/k8s_get_healthy_pods.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from typing import List
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
namespace: str = Field(
title='Namespace',
description='Kubernetes namespace')
def k8s_get_healthy_pods_printer(data: list):
if data is None:
return
print("POD List:")
for pod in data:
print(f"\t {pod}")
def k8s_get_healthy_pods(handle, namespace: str) -> List:
"""k8s_get_healthy_pods get healthy pods
:type handle: object
:param handle: Object returned from the Task validate method
:type namespace: str
:param namespace: Kubernetes namespace.
:rtype: List
"""
coreApiClient = client.CoreV1Api(api_client=handle)
try:
coreApiClient.read_namespace_status(namespace, pretty=True)
except ApiException as e:
#print("Exception when calling CoreV1Api->read_namespace_status: %s\n" % e)
raise e
all_healthy_pods = []
ret = coreApiClient.list_namespaced_pod(namespace=namespace)
for i in ret.items:
phase = i.status.phase
if phase in ("Running", "Succeeded"):
all_healthy_pods.append(i.metadata.name)
return all_healthy_pods
================================================
FILE: Kubernetes/legos/k8s_get_memory_utilization_of_services/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_memory_utilization_of_services/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_memory_utilization_of_services/k8s_get_memory_utilization_of_services.json
================================================
{
"action_title": "Get K8s services exceeding memory utilization",
"action_description": "This action executes the given kubectl commands to find the memory utilization of the specified services in a particular namespace and compares it with a given threshold.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_memory_utilization_of_services",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": [
""
],
"action_next_hop_parameter_mapping": {},
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: Kubernetes/legos/k8s_get_memory_utilization_of_services/k8s_get_memory_utilization_of_services.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
import os
import json
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from tabulate import tabulate
class InputSchema(BaseModel):
services: list = Field(
description='List of pod names of the services for which memory utilization is to be fetched.',
title='List of pod names (as services)',
)
namespace: str = Field(
description='Namespace in which the services are running.',
title='K8s Namespace',
)
threshold: Optional[float] = Field(
80,
description='Threshold for memory utilization percentage. Default is 80%.',
title='Threshold (in %)',
)
def k8s_get_memory_utilization_of_services_printer(output):
status, data = output
if status:
print("All services are within memory utilization threshold")
else:
headers = ["Service", "Pod", "Namespace", "Container", "Utilization %"]
table_data = []
for entry in data:
service = entry.get('service', "N/A")
pod = entry.get('pod', "N/A")
namespace = entry.get('namespace', "N/A")
container = entry.get('container_name', "N/A")
utilization_percentage = entry.get('utilization_percentage', "N/A")
table_data.append([service, pod, namespace, container, utilization_percentage])
# Using tabulate to format the output as a grid table
print(tabulate(table_data, headers=headers, tablefmt="grid"))
def convert_memory_to_bytes(memory_value) -> int:
if not memory_value:
return 0
units = {
'K': 1000,
'M': 1000 * 1000,
'G': 1000 * 1000 * 1000,
'T': 1000 * 1000 * 1000 * 1000,
'Ki': 1024,
'Mi': 1024 * 1024,
'Gi': 1024 * 1024 * 1024,
'Ti': 1024 * 1024 * 1024 * 1024,
}
for unit, multiplier in units.items():
if memory_value.endswith(unit):
return int(memory_value[:-len(unit)]) * multiplier
return int(memory_value)
def k8s_get_memory_utilization_of_services(handle, namespace: str = "", threshold:float=80, services: list=[]) -> Tuple:
"""
k8s_get_memory_utilization_of_services executes the given kubectl commands
to find the memory utilization of the specified services in a particular namespace
and compares it with a given threshold.
:param handle: Object returned from the Task validate method, must have client-side validation enabled.
:param namespace: Namespace in which the services are running.
:param threshold: Threshold for memory utilization percentage. Default is 80%.
:param services: List of pod names of the services for which memory utilization is to be fetched.
:return: Status, list of exceeding services if any service has exceeded the threshold.
"""
if handle.client_side_validation is False:
raise Exception(f"K8S Connector is invalid: {handle}")
if services and not namespace:
raise ValueError("Namespace must be provided if services are specified.")
if not namespace:
namespace = 'default'
exceeding_services = []
# Main Idea:
# 1. Given namespace, lets get current memory utilization for top pods
# 2. Filter the list of pods to check from the service list
# 3. For the pods get the memory request
# 4. Calculate utilization as (mem_usage / mem_request) * 100
# 5. Construct list of pods which has Utilization > threshold and return the list
try:
top_pods_command = f"kubectl top pods -n {namespace} --containers --no-headers"
response = handle.run_native_cmd(top_pods_command)
top_pods_output = response.stdout.strip()
if not top_pods_output:
return (True, None)
service_pods_containers = {} # Dictionary to hold pod and container names for each service
if services:
# If services specified, lets iterate over it and get pods corresponding to them.
# If service pod not found in the top pod list, which means the memory
# utilization is not significant, so dont need to check
for svc in services:
kubectl_cmd = f"kubectl get service {svc} -n {namespace} -o=jsonpath={{.spec.selector}}"
response = handle.run_native_cmd(kubectl_cmd)
svc_labels = None
if response.stderr:
print(f"Error occurred while executing command {kubectl_cmd}: {response.stderr}")
continue
try:
if response.stdout.strip():
svc_labels = json.loads(response.stdout.strip())
except:
# If json.loads returns error, which means the output of the kubectl command returned invalid output.
# since there is invalid output, no service label output. the next if check should return back
pass
if not svc_labels:
continue
_labels = ", ".join([f"{key}={value}" for key, value in svc_labels.items()])
svc_pod_cmd = f"kubectl get pods -n {namespace} -l \"{_labels}\" -o=jsonpath={{.items[*].metadata.name}}"
response = handle.run_native_cmd(svc_pod_cmd)
svc_pods = response.stdout.strip()
if not svc_pods:
# No pods attached to the given service
continue
# For each pod, fetch containers and their memory usage
for svc_pod in svc_pods.split():
for line in top_pods_output.split('\n'):
if svc_pod in line:
parts = line.split()
if len(parts) >= 3: # Ensure line has enough parts to parse
container_name = parts[1]
mem_usage = parts[-1]
else:
print(f"Incorrect top pods output for pod:{svc_pod} namespace: {namespace}.")
continue
# Key: Service, Pod, Container; Value: Memory Usage
service_pods_containers[(svc, svc_pod, container_name)] = mem_usage
else:
for line in top_pods_output.split('\n'):
parts = line.split()
if len(parts) >= 3:
pod_name, container_name, mem_usage = parts[0], parts[1], parts[-1]
else:
print(f"Incorrect top pods output for namespace: {namespace}.")
continue
# Key: Service: None, Pod, Container; Value: Memory Usage (when services are not specified)
service_pods_containers[(None, pod_name, container_name)] = mem_usage
# Now, for each service's pod and container, fetch memory request and calculate utilization
for (service_key, pod, container), mem_usage in service_pods_containers.items():
# Check if the service name exists or use a placeholder
service_name = service_key if service_key else "N/A"
# Kubernetes pod must have at least one container. The container is the smallest deployable unit in
# Kubernetes. A pod encapsulates one or more containers, storage resources, a unique network IP,
# and options that govern how the container(s) should run. When you define a pod manifest in Kubernetes,
# you define one or more containers within it. Each container has its own image, environment variables,
# resources, and other configuration settings. It's the containers within the pod that execute the actual application
# code or processes. Without at least one container, there would be no workloads running within the pod, and
# it would essentially be an empty entity without any purpose in the Kubernetes ecosystem.
# The below command takes the container name that was obtained earlier and uses it to get the memory request
kubectl_command = f"kubectl get pod {pod} -n {namespace} -o=jsonpath='{{.spec.containers[?(@.name==\"{container}\")].resources.requests.memory}}'"
response = handle.run_native_cmd(kubectl_command)
mem_request = response.stdout.strip()
if not mem_request:
# Memory limit is not set, dont calculate utilization
continue
mem_request_bytes = convert_memory_to_bytes(mem_request)
mem_usage_bytes = convert_memory_to_bytes(mem_usage)
if mem_request_bytes > 0:
utilization = (mem_usage_bytes / mem_request_bytes) * 100
utilization = round(utilization, 2)
if utilization > threshold:
exceeding_services.append({
"service": service_name,
"pod": pod,
"container_name": container,
"namespace": namespace,
"utilization_percentage": utilization,
"memory_request_bytes": mem_request_bytes,
"memory_usage_bytes": mem_usage_bytes,
})
else:
print(f"Memory request for pod: {pod}, container: {container} is 0 or not set. Skipping...")
continue
except Exception as e:
raise e
return (False, exceeding_services) if exceeding_services else (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_node_status_and_resource_utilization/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_node_status_and_resource_utilization/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_node_status_and_resource_utilization/k8s_get_node_status_and_resource_utilization.json
================================================
{
"action_title": "Get K8s node status and CPU utilization",
"action_description": "This action gathers Kubernetes node status and resource utilization information.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_node_status_and_resource_utilization",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": ["CATEGORY_TYPE_INFORMATION" , "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_NODE" ]
}
================================================
FILE: Kubernetes/legos/k8s_get_node_status_and_resource_utilization/k8s_get_node_status_and_resource_utilization.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
from typing import List
from tabulate import tabulate
import json
from kubernetes.client.rest import ApiException
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def k8s_get_node_status_and_resource_utilization_printer(output):
if not output:
print("No Data to Display")
else:
headers = ['Node Name', 'Status', 'CPU Usage (%)', 'Memory Usage (%)']
print(tabulate(output, headers, tablefmt='pretty'))
def k8s_get_node_status_and_resource_utilization(handle) -> List:
if handle.client_side_validation is not True:
print(f"K8S Connector is invalid: {handle}")
return []
# Command to fetch node resource utilization
node_utilization_cmd = "kubectl top nodes --no-headers"
node_utilization = handle.run_native_cmd(node_utilization_cmd)
if node_utilization.stderr:
raise ApiException(f"Error occurred while executing command {node_utilization_cmd} {node_utilization.stderr}")
utilization_lines = node_utilization.stdout.split('\n')
# Command to fetch node status
node_status_cmd = "kubectl get nodes -o json"
node_status = handle.run_native_cmd(node_status_cmd)
if node_status.stderr:
raise ApiException(f"Error occurred while executing command {node_status_cmd} {node_status.stderr}")
nodes_info = json.loads(node_status.stdout)
data = []
for item, utilization_line in zip(nodes_info['items'], utilization_lines):
node_name = item['metadata']['name']
node_status = item['status']['conditions'][-1]['type']
utilization_parts = utilization_line.split()
cpu_usage_percent = utilization_parts[2].rstrip('%')
memory_usage_percent = utilization_parts[4].rstrip('%')
data.append([node_name, node_status, cpu_usage_percent, memory_usage_percent])
return data
================================================
FILE: Kubernetes/legos/k8s_get_nodes/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_nodes/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_nodes/k8s_get_nodes.json
================================================
{
"action_title": "Get Kubernetes Nodes",
"action_description": "Get Kubernetes Nodes",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_nodes",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_NODE" ]
}
================================================
FILE: Kubernetes/legos/k8s_get_nodes/k8s_get_nodes.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
import datetime
from typing import Tuple
from pydantic import BaseModel
from tabulate import tabulate
from kubernetes import client
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
pass
def k8s_get_nodes_printer(result):
if result is None:
return
tabular_config_map = result[0]
print("\n")
print(tabulate(tabular_config_map, tablefmt="github",
headers=['name', 'status', 'age', 'version', 'labels']))
def k8s_get_nodes(handle) -> Tuple:
"""k8s_get_nodes get nodes
:rtype: Tuple
"""
coreApiClient = client.CoreV1Api(api_client=handle)
try:
resp = coreApiClient.list_node(pretty=True)
except ApiException as e:
resp = 'An Exception occured while executing the command' + e.reason
raise e
output = []
tabular_config_map = []
for node in resp.items:
print(node.metadata.labels)
labels = [f"{label}={value}"
for label, value in node.metadata.labels.items()]
nodeStatus = node.status.conditions
types = ""
for i in nodeStatus:
types = i.type
name = node.metadata.name
status = types
age = (datetime.datetime.now() -
node.metadata.creation_timestamp.replace(tzinfo=None)).days
version = node.status.node_info.kubelet_version
labels = ",".join(labels)
tabular_config_map.append([name, status, age, version, labels])
output.append({
"name": name,
"status": types,
"age": f"{(datetime.datetime.now() - node.metadata.creation_timestamp.replace(tzinfo=None)).days}d",
"version": version, "labels": labels})
return (tabular_config_map, output)
================================================
FILE: Kubernetes/legos/k8s_get_nodes_pressure/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_nodes_pressure/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_nodes_pressure/k8s_get_nodes_pressure.json
================================================
{
"action_title": "Get K8s nodes disk and memory pressure",
"action_description": "This action fetches the memory and disk pressure status of each node in the cluster",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_nodes_pressure",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": ["CATEGORY_TYPE_INFORMATION" , "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_NODE" ]
}
================================================
FILE: Kubernetes/legos/k8s_get_nodes_pressure/k8s_get_nodes_pressure.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
from typing import Optional, Tuple
from pydantic import BaseModel
from tabulate import tabulate
import json
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
pass
def k8s_get_nodes_pressure_printer(output):
if output is None:
return
status, data = output
if status:
print("No nodes have memory or disk pressure issues.")
return
headers = ['Node', 'Type', 'Status']
formatted_data = [[item['node'], item['type'], item['status']] for item in data]
print(tabulate(formatted_data, headers=headers, tablefmt='pretty'))
def k8s_get_nodes_pressure(handle) -> Tuple:
"""
k8s_get_nodes_pressure fetches the memory and disk pressure status of each node in the cluster
:type handle: object
:param handle: Object returned from the Task validate method
:rtype: List of memory and disk pressure status of each node in the cluster
"""
if handle.client_side_validation is not True:
print(f"K8S Connector is invalid: {handle}")
return "Invalid Handle"
# Getting nodes details in json format
cmd = "kubectl get nodes -o json"
result = handle.run_native_cmd(cmd)
if result.stderr:
raise ApiException(f"Error occurred while executing command {cmd} {result.stderr}")
nodes = json.loads(result.stdout)['items']
pressure_nodes = []
for node in nodes:
name = node['metadata']['name']
conditions = node['status']['conditions']
memory_pressure = next((item for item in conditions if item["type"] == "MemoryPressure"), None)
disk_pressure = next((item for item in conditions if item["type"] == "DiskPressure"), None)
# Check for pressure conditions being False
if memory_pressure and memory_pressure['status'] != "False":
pressure_nodes.append({"node": name, "type": "MemoryPressure", "status": "False"})
if disk_pressure and disk_pressure['status'] != "False":
pressure_nodes.append({"node": name, "type": "DiskPressure", "status": "False"})
if len(pressure_nodes) != 0:
return (False, pressure_nodes)
return (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_nodes_with_insufficient_resources/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_nodes_with_insufficient_resources/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_nodes_with_insufficient_resources/k8s_get_nodes_with_insufficient_resources.json
================================================
{
"action_title": "Get Kubernetes Nodes that have insufficient resources",
"action_description": "Get Kubernetes Nodes that have insufficient resources",
"action_type": "LEGO_TYPE_K8S",
"action_is_check": true,
"action_entry_function": "k8s_get_nodes_with_insufficient_resources",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S"]
}
================================================
FILE: Kubernetes/legos/k8s_get_nodes_with_insufficient_resources/k8s_get_nodes_with_insufficient_resources.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
import pprint
from typing import Tuple
from pydantic import BaseModel, Field
from tabulate import tabulate
from kubernetes import client
from kubernetes.client.rest import ApiException
try:
from unskript.legos.kubernetes.k8s_utils import normalize_cpu, normalize_memory, normalize_storage
except Exception:
pass
class InputSchema(BaseModel):
threshold: int = Field(
85,
title='Threshold',
description='Threshold in %age. Default is 85%'
)
def k8s_get_nodes_with_insufficient_resources_printer(output):
if output is None:
return
res_hdr = ["Name", "Resource"]
data = []
for o in output[1]:
if isinstance(o, dict) is True:
res_hdr = ["Name", "Allocatable", "Capacity"]
data.append([
o.get('name'),
pprint.pformat(o.get('allocatable')),
pprint.pformat(o.get('capacity'))
])
print(tabulate(data, headers=res_hdr, tablefmt='fancy_grid'))
def k8s_get_nodes_with_insufficient_resources(handle, threshold: int = 85) -> Tuple:
"""k8s_get_nodes_with_insufficient_resources returns the list of nodes that have insufficient resources
:type handle: Object
:param handle: Object returned from task.validate(...) function
:type threshold: int
:param threshold: Threshold in Percentage. Default value being 85.
Any node resource exceeding that threshold
is flagged as having insufficient resource.
:rtype: Tuple of the result
"""
if handle.client_side_validation is not True:
raise ApiException(f"K8S Connector is invalid {handle}")
api_client = client.CoreV1Api(api_client=handle)
retval = []
nodes = api_client.list_node().items
for node in nodes:
cpu_allocatable = normalize_cpu(node.status.allocatable.get('cpu'))
cpu_capacity = normalize_cpu(node.status.capacity.get('cpu'))
mem_allocatable = normalize_memory(node.status.allocatable.get('memory'))
mem_capacity = normalize_memory(node.status.capacity.get('memory'))
storage_allocatable = normalize_storage(node.status.allocatable.get('ephemeral-storage'))
storage_capacity = normalize_storage(node.status.capacity.get('ephemeral-storage'))
cpu_usage_percent = (cpu_capacity - cpu_allocatable)/cpu_capacity * 100
mem_usage_percent = (mem_capacity - mem_allocatable)/mem_capacity * 100
storage_usage_percent = (storage_capacity - storage_allocatable)/storage_capacity * 100
if cpu_usage_percent >= threshold \
or mem_usage_percent >= threshold \
or storage_usage_percent >= threshold:
retval.append({
'name': node.metadata.name,
'allocatable': node.status.allocatable,
'capacity': node.status.capacity
})
if retval:
return(False, retval)
return (True, [])
================================================
FILE: Kubernetes/legos/k8s_get_offline_nodes/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_offline_nodes/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_offline_nodes/k8s_get_offline_nodes.json
================================================
{
"action_title": "Get K8s offline nodes",
"action_description": "This action checks if any node in the Kubernetes cluster is offline.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_offline_nodes",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_NODE" ],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Kubernetes/legos/k8s_get_offline_nodes/k8s_get_offline_nodes.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
import json
from typing import Tuple
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def k8s_get_offline_nodes_printer(output):
if output is None:
return
print(output)
def k8s_get_offline_nodes(handle) -> Tuple:
"""
k8s_get_offline_nodes checks if any node in the Kubernetes cluster is offline.
:type handle: object
:param handle: Object returned from the Task validate method
:rtype: tuple
:return: Status, List of offline nodes
"""
if handle.client_side_validation is not True:
print(f"K8S Connector is invalid: {handle}")
return (False, ["Invalid Handle"])
# Getting nodes details in json format
cmd = "kubectl get nodes -o json"
result = handle.run_native_cmd(cmd)
if result.stderr:
raise Exception(f"Error occurred while executing command {cmd} {result.stderr}")
nodes = json.loads(result.stdout)['items']
offline_nodes = []
for node in nodes:
name = node['metadata']['name']
conditions = node['status']['conditions']
node_ready = next((item for item in conditions if item["type"] == "Ready"), None)
if node_ready and node_ready['status'] == "False":
offline_nodes.append(name)
if len(offline_nodes) != 0:
return (False, offline_nodes)
return (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_oomkilled_pods/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_oomkilled_pods/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_oomkilled_pods/k8s_get_oomkilled_pods.json
================================================
{
"action_title": "Get K8S OOMKilled Pods",
"action_description": "Get K8S Pods which are OOMKilled from the container last states.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_oomkilled_pods",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_TROUBLESHOOTING","CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_POD" ],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Kubernetes/legos/k8s_get_oomkilled_pods/k8s_get_oomkilled_pods.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
import pprint
import datetime
from datetime import timezone
from typing import Tuple, Optional
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
namespace: Optional[str] = Field(
'',
description='Kubernetes Namespace Where the Service exists',
title='K8S Namespace',
)
time_interval_to_check: int = Field(
24,
description='Time interval in hours. This time window is used to check if POD good OOMKilled. Default is 24 hours.',
title="Time Interval"
)
def k8s_get_oomkilled_pods_printer(output):
if output is None:
return
pprint.pprint(output)
def format_datetime(dt):
# Format datetime to a string 'YYYY-MM-DD HH:MM:SS UTC'
return dt.strftime('%Y-%m-%d %H:%M:%S UTC')
def k8s_get_oomkilled_pods(handle, namespace: str = "", time_interval_to_check=24) -> Tuple:
"""k8s_get_oomkilled_pods This function returns the pods that have OOMKilled event in the container last states
:type handle: Object
:param handle: Object returned from the task.validate(...) function
:type namespace: str
:param namespace: (Optional)String, K8S Namespace as python string
:type time_interval_to_check: int
:param time_interval_to_check: (Optional) Integer, in hours, the interval within which the
state of the POD should be checked.
:rtype: Status, List of objects of pods, namespaces, and containers that are in OOMKilled state
"""
result = []
if handle.client_side_validation is not True:
raise ApiException(f"K8S Connector is invalid {handle}")
v1 = client.CoreV1Api(api_client=handle)
# Check whether a namespace is provided, if not fetch all namespaces
try:
if namespace:
response = v1.list_namespaced_pod(namespace)
else:
response = v1.list_pod_for_all_namespaces()
if response is None or not hasattr(response, 'items'):
raise ApiException("Unexpected response from the Kubernetes API. 'items' not found in the response.")
pods = response.items
except ApiException as e:
raise e
# Check if pods is None or not
if pods is None:
raise ApiException("No pods returned from the Kubernetes API.")
# Get Current Time in UTC
current_time = datetime.datetime.now(timezone.utc)
# Get time interval to check (or 24 hour) reference and convert to UTC
interval_time_to_check = current_time - datetime.timedelta(hours=time_interval_to_check)
interval_time_to_check = interval_time_to_check.replace(tzinfo=timezone.utc)
for pod in pods:
pod_name = pod.metadata.name
namespace = pod.metadata.namespace
# Ensure container_statuses is not None before iterating
container_statuses = pod.status.container_statuses
if container_statuses is None:
continue
# Check each pod for OOMKilled state
for container_status in container_statuses:
container_name = container_status.name
last_state = container_status.last_state
if last_state and last_state.terminated and last_state.terminated.reason == "OOMKilled":
termination_time = last_state.terminated.finished_at
termination_time = termination_time.replace(tzinfo=timezone.utc)
# If termination time is greater than interval_time_to_check meaning
# the POD has gotten OOMKilled in the last 24 hours, so lets flag it!
if termination_time and termination_time >= interval_time_to_check:
formatted_termination_time = format_datetime(termination_time)
formatted_interval_time_to_check = format_datetime(interval_time_to_check)
result.append({"pod": pod_name, "namespace": namespace, "container": container_name, "termination_time":formatted_termination_time,"interval_time_to_check": formatted_interval_time_to_check})
return (False, result) if result else (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_pending_pods/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_pending_pods/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_pending_pods/k8s_get_pending_pods.json
================================================
{
"action_title": "Get K8s get pending pods",
"action_description": "This action checks if any pod in the Kubernetes cluster is in 'Pending' status.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_pending_pods",
"action_needs_credential": "true",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_POD" ],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Kubernetes/legos/k8s_get_pending_pods/k8s_get_pending_pods.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
from typing import Optional, Tuple
from pydantic import BaseModel, Field
import json
from tabulate import tabulate
from datetime import datetime, timedelta, timezone
class InputSchema(BaseModel):
namespace: Optional[str] = Field('', description='k8s Namespace', title='Namespace')
time_interval_to_check: int = Field(
24,
description='Time interval in hours. This time window is used to check if POD was in Pending state. Default is 24 hours.',
title="Time Interval"
)
def k8s_get_pending_pods_printer(output):
status, data = output
if status:
print("There are no pending pods.")
return
else:
headers = ["Pod Name", "Namespace"]
print(tabulate(data, headers=headers, tablefmt="grid"))
def format_datetime(dt):
return dt.strftime("%Y-%m-%d %H:%M:%S %Z")
def k8s_get_pending_pods(handle, namespace: str = "", time_interval_to_check=24) -> Tuple:
"""
k8s_get_pending_pods checks if any pod in the Kubernetes cluster is in 'Pending' status within the specified time interval.
:type handle: object
:param handle: Object returned from the Task validate method
:type namespace: string
:param namespace: Namespace in which to look for the resources. If not provided, all namespaces are considered
:type time_interval_to_check: int
:param time_interval_to_check: (Optional) Integer, in hours, the interval within which the
state of the POD should be checked.
:rtype: tuple
:return: Status, list of pending pods with their namespace and the time they became pending
"""
if handle.client_side_validation is not True:
print(f"K8S Connector is invalid: {handle}")
return False, "Invalid Handle"
namespace_option = f"--namespace={namespace}" if namespace else "--all-namespaces"
# Getting pods details in json format
cmd = f"kubectl get pods -o json {namespace_option}"
result = handle.run_native_cmd(cmd)
if result.stderr:
raise Exception(f"Error occurred while executing command {cmd}: {result.stderr}")
pods = json.loads(result.stdout)['items']
pending_pods = []
current_time = datetime.now(timezone.utc)
interval_time_to_check = current_time - timedelta(hours=time_interval_to_check)
interval_time_to_check = interval_time_to_check.replace(tzinfo=timezone.utc)
for pod in pods:
name = pod['metadata']['name']
status = pod['status']['phase']
pod_namespace = pod['metadata']['namespace']
if status == 'Pending':
# Check if the pod has been in Pending state within the specified the last 24 hours
start_time = pod['status'].get('startTime')
if start_time:
start_time = datetime.strptime(start_time, "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=timezone.utc)
if start_time >= interval_time_to_check:
formatted_start_time = format_datetime(start_time)
formatted_interval_time_to_check = format_datetime(interval_time_to_check)
pending_pods.append({
"pod": name,
"namespace": pod_namespace,
"start_time": formatted_start_time,
"interval_time_to_check": formatted_interval_time_to_check
})
if pending_pods:
return (False, pending_pods)
return (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_pod_config/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_pod_config/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_pod_config/k8s_get_pod_config.json
================================================
{
"action_title": "Get Kubernetes POD Configuration",
"action_description": "Get Kubernetes POD Configuration",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_pod_config",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ,"CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_POD"]
}
================================================
FILE: Kubernetes/legos/k8s_get_pod_config/k8s_get_pod_config.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from kubernetes import client
class InputSchema(BaseModel):
namespace: str = Field(
title='Namespace',
description='Kubernetes namespace')
pod: str = Field(
title="Pod",
description='Kubernetes Pod Name. eg ngix-server')
def k8s_get_pod_config_printer(output):
if output is None:
return
pprint.pprint(output)
def k8s_get_pod_config(handle, namespace: str, pod: str) -> str:
"""k8s_get_pod_config get pod config
:type handle: object
:param handle: Object returned from the Task validate method
:type namespace: str
:param namespace: Kubernetes namespace.
:type pod: str
:param pod: Kubernetes Pod Name.
:rtype: string
"""
coreApiClient = client.AppsV1Api(api_client=handle)
field_selector = "metadata.name=" + pod
res = coreApiClient.list_namespaced_deployment(
namespace=namespace, pretty=True, field_selector=field_selector)
return res
================================================
FILE: Kubernetes/legos/k8s_get_pod_logs/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_pod_logs_and_filter/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_pod_logs_and_filter/k8s_get_pod_logs_and_filter.json
================================================
{
"action_title": "Get Kubernetes Logs for a list of PODs & Filter in a Namespace",
"action_description": "Get Kubernetes Logs for a list of PODs and Filter in a Namespace",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_pod_logs_and_filter",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ,"CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_POD"]
}
================================================
FILE: Kubernetes/legos/k8s_get_pod_logs_and_filter/k8s_get_pod_logs_and_filter.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import re
import pprint
from typing import List, Dict
from pydantic import BaseModel, Field
from kubernetes import client
class InputSchema(BaseModel):
namespace: str = Field(
title='Namespace',
description='k8s namespace')
pods: list = Field(
title='Pods',
description='Name of pods')
matchstr: str = Field(
title='Match String',
description='String to Match in the Logs')
def k8s_get_pod_logs_and_filter_printer(output):
if output is None:
return
pprint.pprint(output)
def k8s_get_pod_logs_and_filter(handle, namespace: str, pods: List, matchstr: str) -> Dict:
"""k8s_get_pod_logs_and_filter get pod logs
:type handle: object
:param handle: Object returned from the Task validate method
:type namespace: str
:param namespace: k8s namespace.
:type pods: List
:param pods: Name of pods.
:type matchstr: str
:param matchstr: String to Match in the Logs.
:rtype: Dict
"""
coreApiClient = client.CoreV1Api(api_client=handle)
result = {}
try:
for pod in pods:
resp = coreApiClient.read_namespaced_pod_log(
namespace=namespace, name=pod, pretty=True, timestamps=True)
res = re.search(f'({matchstr})', resp)
if res is not None:
result[pod] = res
except Exception:
print("Unable to Read Logs from the Pods")
return result
================================================
FILE: Kubernetes/legos/k8s_get_pod_status/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_pods_in_crashloopbackoff_state/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_pods_in_crashloopbackoff_state/k8s_get_pods_in_crashloopbackoff_state.json
================================================
{
"action_title": "Get all K8s Pods in CrashLoopBackOff State",
"action_description": "Get all K8s pods in CrashLoopBackOff State",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_pods_in_crashloopbackoff_state",
"action_is_check": true,
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_TROUBLESHOOTING","CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_POD" ],
"action_next_hop": ["1d3a64b3c396be6d27b260606aa5570f61e79f3b7adcda457e026da657edc079"],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Kubernetes/legos/k8s_get_pods_in_crashloopbackoff_state/k8s_get_pods_in_crashloopbackoff_state.py
================================================
#
# Copyright (c) 2022 unSkript.com
# All rights reserved.
#
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
from tabulate import tabulate
import datetime
from datetime import timezone
class InputSchema(BaseModel):
namespace: Optional[str] = Field(
default='',
title='Namespace',
description='k8s Namespace')
time_interval_to_check: int = Field(
24,
description='Time interval in hours. This time window is used to check if POD was in Crashloopback. Default is 24 hours.',
title="Time Interval"
)
def k8s_get_pods_in_crashloopbackoff_state_printer(output):
status, data = output
if status:
print("No pods are in CrashLoopBackOff state.")
else:
headers = ["Pod Name", "Namespace", "Container Name"]
table_data = [(entry["pod"], entry["namespace"], entry["container"]) for entry in data]
print(tabulate(table_data, headers=headers, tablefmt="grid"))
def format_datetime(dt):
# Format datetime to a string 'YYYY-MM-DD HH:MM:SS UTC'
return dt.strftime('%Y-%m-%d %H:%M:%S UTC')
def k8s_get_pods_in_crashloopbackoff_state(handle, namespace: str = '', time_interval_to_check=24) -> Tuple:
"""
k8s_get_pods_in_crashloopbackoff_state returns the pods that have CrashLoopBackOff state in their container statuses within the specified time interval.
:type handle: Object
:param handle: Object returned from the task.validate(...) function
:type namespace: str
:param namespace: (Optional) String, K8S Namespace as python string
:type time_interval_to_check: int
:param time_interval_to_check: (Optional) Integer, in hours, the interval within which the
state of the POD should be checked.
:rtype: Status, List of objects of pods, namespaces, and containers that are in CrashLoopBackOff state
"""
result = []
if handle.client_side_validation is not True:
raise ApiException(f"K8S Connector is invalid {handle}")
v1 = client.CoreV1Api(api_client=handle)
try:
if namespace:
response = v1.list_namespaced_pod(namespace)
else:
response = v1.list_pod_for_all_namespaces()
if response is None or not hasattr(response, 'items'):
raise ApiException("Unexpected response from the Kubernetes API. 'items' not found in the response.")
pods = response.items
except ApiException as e:
raise e
if pods is None:
raise ApiException("No pods returned from the Kubernetes API.")
current_time = datetime.datetime.now(timezone.utc)
interval_time_to_check = current_time - datetime.timedelta(hours=time_interval_to_check)
interval_time_to_check = interval_time_to_check.replace(tzinfo=timezone.utc)
for pod in pods:
pod_name = pod.metadata.name
namespace = pod.metadata.namespace
container_statuses = pod.status.container_statuses
if container_statuses is None:
continue
for container_status in container_statuses:
container_name = container_status.name
if container_status.state and container_status.state.waiting and container_status.state.waiting.reason == "CrashLoopBackOff":
# Check if the last transition time to CrashLoopBackOff is within the specified interval
if container_status.last_state and container_status.last_state.terminated:
last_transition_time = container_status.last_state.terminated.finished_at
if last_transition_time:
last_transition_time = last_transition_time.replace(tzinfo=timezone.utc)
if last_transition_time >= interval_time_to_check:
formatted_transition_time = format_datetime(last_transition_time)
formatted_interval_time_to_check = format_datetime(interval_time_to_check)
result.append({
"pod": pod_name,
"namespace": namespace,
"container": container_name,
"last_transition_time": formatted_transition_time,
"interval_time_to_check": formatted_interval_time_to_check
})
return (False, result) if result else (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_pods_in_imagepullbackoff_state/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_pods_in_imagepullbackoff_state/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_pods_in_imagepullbackoff_state/k8s_get_pods_in_imagepullbackoff_state.json
================================================
{
"action_title": "Get all K8s Pods in ImagePullBackOff State",
"action_description": "Get all K8s pods in ImagePullBackOff State",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_pods_in_imagepullbackoff_state",
"action_is_check": true,
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_TROUBLESHOOTING","CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_POD" ],
"action_next_hop": ["a53b5860500e142aa387ce55d5e85f139596c521dfb5c920cc2bc47c38fc0b11"],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Kubernetes/legos/k8s_get_pods_in_imagepullbackoff_state/k8s_get_pods_in_imagepullbackoff_state.py
================================================
#
# Copyright (c) 2022 unSkript.com
# All rights reserved.
#
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
from tabulate import tabulate
class InputSchema(BaseModel):
namespace: Optional[str] = Field(
default='',
title='Namespace',
description='k8s Namespace')
def k8s_get_pods_in_imagepullbackoff_state_printer(output):
status, data = output
if status:
print("No pods are in ImagePullBackOff or ErrImagePull state.")
else:
headers = ["Pod Name", "Namespace", "Container Name"]
table_data = [(entry["pod"], entry["namespace"], entry["container"]) for entry in data]
print(tabulate(table_data, headers=headers, tablefmt="grid"))
def k8s_get_pods_in_imagepullbackoff_state(handle, namespace: str = '') -> Tuple:
"""
k8s_get_pods_in_imagepullbackoff_state returns the pods that have ImagePullBackOff or ErrImagePull state in their container statuses.
:type handle: Object
:param handle: Object returned from the task.validate(...) function
:type namespace: str
:param namespace: (Optional) String, K8S Namespace as python string
:rtype: Status, List of objects of pods, namespaces, and containers in ImagePullBackOff or ErrImagePull state
"""
result = []
if handle.client_side_validation is not True:
raise ApiException(f"K8S Connector is invalid {handle}")
v1 = client.CoreV1Api(api_client=handle)
try:
if namespace:
pods = v1.list_namespaced_pod(namespace).items
if not pods:
return (True, None)
else:
pods = v1.list_pod_for_all_namespaces().items
except ApiException as e:
raise e
for pod in pods:
pod_name = pod.metadata.name
namespace = pod.metadata.namespace
container_statuses = pod.status.container_statuses
if container_statuses is None:
continue
for container_status in container_statuses:
container_name = container_status.name
if container_status.state and container_status.state.waiting:
reason = container_status.state.waiting.reason
if reason in ["ImagePullBackOff", "ErrImagePull"]:
result.append({"pod": pod_name, "namespace": namespace, "container": container_name})
return (False, result) if result else (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_pods_in_not_running_state/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_pods_in_not_running_state/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_pods_in_not_running_state/k8s_get_pods_in_not_running_state.json
================================================
{
"action_title": "Get Kubernetes PODs in not Running State",
"action_description": "Get Kubernetes PODs in not Running State",
"action_type": "LEGO_TYPE_K8S",
"action_is_check": true,
"action_entry_function": "k8s_get_pods_in_not_running_state",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S"]
}
================================================
FILE: Kubernetes/legos/k8s_get_pods_in_not_running_state/k8s_get_pods_in_not_running_state.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
from typing import Tuple
from pydantic import BaseModel, Field
import json
class InputSchema(BaseModel):
namespace: str = Field(
'',
description='K8S Namespace',
title='K8S Namespace'
)
def k8s_get_pods_in_not_running_state_printer(output):
if output is None:
return
print(output)
def k8s_get_pods_in_not_running_state(handle, namespace: str = '') -> Tuple:
"""k8s_get_pods_in_not_running_state this check function checks for pods not in "Running" state and status.phase is not "Succeeded"
and returns the output of list of pods. It does not consider "Completed" status as an errored state.
:type handle: Object
:param handle: Object returned from the task.validate(...) function
:rtype: Tuple Result in tuple format.
"""
if handle.client_side_validation is not True:
raise Exception(f"K8S Connector is invalid {handle}")
cmd_base = "kubectl get pods"
ns_arg = f"-n {namespace}" if namespace else "--all-namespaces"
field_selector = "--field-selector=status.phase!=Running,status.phase!=Succeeded"
output_format = "-o json"
kubectl_command = f"{cmd_base} {ns_arg} {field_selector} {output_format}"
result = handle.run_native_cmd(kubectl_command)
if result.stderr:
raise Exception(f"Error occurred while executing command {kubectl_command}: {result.stderr}")
failed_pods = []
if result.stdout:
pods = json.loads(result.stdout).get("items", [])
if pods:
failed_pods = [{'name': pod['metadata']['name'],
'namespace': pod['metadata']['namespace'],
'status': pod['status']['phase']} for pod in pods]
return (False, failed_pods)
return (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_pods_in_terminating_state/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_pods_in_terminating_state/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_pods_in_terminating_state/k8s_get_pods_in_terminating_state.json
================================================
{
"action_title": "Get all K8s Pods in Terminating State",
"action_description": "Get all K8s pods in Terminating State",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_pods_in_terminating_state",
"action_is_check": true,
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_POD" ],
"action_next_hop": ["7108717393788c2d76687490938faffe5e6e2a46f05405f180e089a166761173"],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Kubernetes/legos/k8s_get_pods_in_terminating_state/k8s_get_pods_in_terminating_state.py
================================================
#
# Copyright (c) 2022 unSkript.com
# All rights reserved.
#
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
namespace: Optional[str] = Field(
default='',
title='Namespace',
description='k8s Namespace')
def k8s_get_pods_in_terminating_state_printer(output):
if output is None:
return
pprint.pprint(output)
def k8s_get_pods_in_terminating_state(handle, namespace: str = '') -> Tuple:
"""
This function returns the pods that are in the Terminating state.
:type handle: Object
:param handle: Object returned from the task.validate(...) function
:type namespace: str
:param namespace: (Optional) String, K8S Namespace as python string
:rtype: Status, List of objects of pods, namespaces, and containers that are in Terminating state
"""
result = []
if handle.client_side_validation is not True:
raise ApiException(f"K8S Connector is invalid {handle}")
v1 = client.CoreV1Api(api_client=handle)
# Check whether a namespace is provided, if not fetch all namespaces
try:
if namespace:
pods = v1.list_namespaced_pod(namespace).items
else:
pods = v1.list_pod_for_all_namespaces().items
except ApiException as e:
raise e
for pod in pods:
pod_name = pod.metadata.name
namespace = pod.metadata.namespace
# Check each pod for Terminating state
if pod.metadata.deletion_timestamp is not None:
result.append({"pod": pod_name, "namespace": namespace})
return (False, result) if result else (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_pods_with_high_restart/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_pods_with_high_restart/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_pods_with_high_restart/k8s_get_pods_with_high_restart.json
================================================
{
"action_title": "Get Kubernetes PODS with high restart",
"action_description": "Get Kubernetes PODS with high restart",
"action_type": "LEGO_TYPE_K8S",
"action_is_check": true,
"action_entry_function": "k8s_get_pods_with_high_restart",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S"]
}
================================================
FILE: Kubernetes/legos/k8s_get_pods_with_high_restart/k8s_get_pods_with_high_restart.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
import datetime
from datetime import timezone
from typing import Tuple
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
# Constants used in this file
INTERVAL_TO_CHECK = 24 # In hours
class InputSchema(BaseModel):
namespace: str = Field(
'',
description='K8S Namespace',
title='K8S Namespace'
)
threshold: int = Field(
25,
description='Restart Threshold Value',
title='Restart Threshold'
)
def k8s_get_pods_with_high_restart_printer(output):
if output is None:
return
print(output)
def format_datetime(dt):
# Format datetime to a string 'YYYY-MM-DD HH:MM:SS UTC'
return dt.strftime('%Y-%m-%d %H:%M:%S UTC')
def k8s_get_pods_with_high_restart(handle, namespace: str = '', threshold: int = 25) -> Tuple:
"""k8s_get_pods_with_high_restart This function finds out PODS that have
high restart count and returns them as a list of dictionaries
:type handle: Object
:param handle: Object returned from the task.validate(...) function
:type namespace: str
:param namespace: K8S Namespace
:type threshold: int
:param threshold: int Restart Threshold Count value
:rtype: Tuple Result in tuple format.
"""
if handle.client_side_validation is not True:
raise Exception(f"K8S Connector is invalid {handle}")
v1 = client.CoreV1Api(api_client=handle)
try:
pods = v1.list_namespaced_pod(namespace).items if namespace else v1.list_pod_for_all_namespaces().items
if not pods:
return (True, None) # No pods in the namespace
except ApiException as e:
raise Exception(f"Error occurred while accessing Kubernetes API: {e}")
retval = []
# It is not enough to check if the restart count is more than the threshold
# we should check if the last time the pod got restarted is not within the 24 hours.
# If it is, then we need to flag it. If not, it could be that the pod restarted at
# some time, but have been stable since then.
# Lets take current time and reference time that is 24 hours ago.
current_time = datetime.datetime.now(timezone.utc)
interval_time_to_check = current_time - datetime.timedelta(hours=INTERVAL_TO_CHECK)
interval_time_to_check = interval_time_to_check.replace(tzinfo=timezone.utc)
for pod in pods:
for container_status in pod.status.container_statuses or []:
restart_count = container_status.restart_count
last_state = container_status.last_state
if restart_count > threshold:
if last_state and last_state.terminated:
termination_time = last_state.terminated.finished_at
termination_time = termination_time.replace(tzinfo=timezone.utc)
# We compare if the termination time is within the last 24 hours, if yes
# then we need to add it to the retval and return the list back
if termination_time and termination_time >= interval_time_to_check:
formatted_termination_time = format_datetime(termination_time)
formatted_interval_time_to_check = format_datetime(interval_time_to_check)
retval.append({"pod": pod.metadata.name, "namespace": pod.metadata.namespace, "termination_time":formatted_termination_time,"interval_time_to_check": formatted_interval_time_to_check})
return (False, retval) if retval else (True, None)
================================================
FILE: Kubernetes/legos/k8s_get_service_images/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_service_images/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_service_images/k8s_get_service_images.json
================================================
{
"action_title": "Get images of K8s services",
"action_description": "Collect images of running services in the provided namespace.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_service_images",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_categories": ["CATEGORY_TYPE_INFORMATION" , "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ,"CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_SERVICE"]
}
================================================
FILE: Kubernetes/legos/k8s_get_service_images/k8s_get_service_images.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
from typing import Optional, Dict
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
from tabulate import tabulate
import json
class InputSchema(BaseModel):
namespace: Optional[str] = Field(
'',
description='K8S Namespace',
title='K8S Namespace'
)
def k8s_get_service_images_printer(output):
table_data = []
if len(output) == 0:
print("No data available")
return
for service, images in output.items():
shortened_images = list(images)
if not shortened_images:
table_data.append([service, "No images found"])
else:
# Join multiple shortened images into a single string
table_data.append([service, "\n".join(shortened_images)])
headers = ["Service (Namespace)", "Images"]
table = tabulate(table_data, headers=headers, tablefmt='grid')
print(table)
def k8s_get_service_images(handle, namespace:str = "") -> Dict:
"""
k8s_get_service_images collects the images of running services in the provided namespace.
:type handle: Object
:param handle: Object returned from the task.validate(...) function
:type namespace: str, optional
:param namespace: The namespace in which the services reside. If not provided, images from all namespaces are fetched.
:return: Dictionary with service names as keys and lists of image names as values.
"""
if not namespace:
get_namespaces_command = "kubectl get ns -o=jsonpath='{.items[*].metadata.name}'"
response = handle.run_native_cmd(get_namespaces_command)
if not response or response.stderr:
raise ApiException(f"Error while executing command ({get_namespaces_command}): {response.stderr if response else 'empty response'}")
namespaces = response.stdout.strip().split()
else:
namespaces = [namespace]
service_images = {}
for ns in namespaces:
# Get the names of all services in the namespace
get_services_command = f"kubectl get svc -n {ns} -o=jsonpath='{{.items[*].metadata.name}}'"
response = handle.run_native_cmd(get_services_command)
if not response or response.stderr:
raise ApiException(f"Error while executing command ({get_services_command}): {response.stderr if response else 'empty response'}")
service_names = response.stdout.strip().split()
for service_name in service_names:
# Get the labels associated with the service to identify its pods
get_service_labels_command = f"kubectl get service {service_name} -n {ns} -o=jsonpath='{{.spec.selector}}'"
response = handle.run_native_cmd(get_service_labels_command)
if not response.stdout.strip():
print(f"No labels found for service {service_name} in namespace {ns}. Skipping...")
continue
labels_dict = json.loads(response.stdout.replace("'", "\""))
label_selector = ",".join([f"{k}={v}" for k, v in labels_dict.items()])
# Get the images from the pods associated with this service
get_images_command = f"kubectl get pods -n {ns} -l {label_selector} -o=jsonpath='{{.items[*].spec.containers[*].image}}'"
response = handle.run_native_cmd(get_images_command)
if response and not response.stderr:
# Deduplicate images and replace 'docker.io' with 'docker_io'
images = list(set(response.stdout.strip().split()))
images = [image.replace('docker.io', 'docker_io') for image in images]
service_key = f"{service_name} ({ns})"
service_images[service_key] = images
else:
service_key = f"{service_name} ({ns})"
service_images[service_key] = []
return service_images
================================================
FILE: Kubernetes/legos/k8s_get_service_with_no_associated_endpoints/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_service_with_no_associated_endpoints/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_service_with_no_associated_endpoints/k8s_get_service_with_no_associated_endpoints.json
================================================
{
"action_title": "Get K8S Service with no associated endpoints",
"action_description": "Get K8S Service with no associated endpoints",
"action_type": "LEGO_TYPE_K8S",
"action_is_check": true,
"action_entry_function": "k8s_get_service_with_no_associated_endpoints",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S"]
}
================================================
FILE: Kubernetes/legos/k8s_get_service_with_no_associated_endpoints/k8s_get_service_with_no_associated_endpoints.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
from typing import Tuple, Optional
from pydantic import BaseModel, Field
from tabulate import tabulate
from kubernetes import client
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
namespace:str = Field(
title = "K8S Namespace",
description = "Kubernetes Namespace Where the Service exists"
)
core_services: list = Field(
title = "Names of whitelisted services",
description = "List of services"
)
def k8s_get_service_with_no_associated_endpoints_printer(output):
status, data = output
if status:
print("No services with missing endpoints found !")
else:
table_headers = ["Namespace", "Service Name"]
table_data = [(entry["namespace"], entry["name"]) for entry in data]
print(tabulate(table_data, headers=table_headers, tablefmt = "grid"))
def k8s_get_service_with_no_associated_endpoints(handle, namespace: str , core_services:list) -> Tuple:
"""k8s_get_service_with_no_associated_endpoints This function returns Services that
do not have any associated endpoints.
:type handle: Object
:param handle: Object returned from the task.validate(...) function
:type namespace: str
:param namespace: String, K8S Namespace as python string
:rtype: Tuple Result in tuple format.
"""
if handle.client_side_validation is not True:
raise ApiException(f"K8S Connector is invalid {handle}")
v1 = client.CoreV1Api(api_client=handle)
retval = []
for service_name in core_services:
try:
service = v1.read_namespaced_service(name=service_name, namespace=namespace)
ep = v1.read_namespaced_endpoints(name=service_name, namespace=namespace)
if not ep.subsets:
retval.append({"name": service.metadata.name, "namespace": service.metadata.namespace})
except ApiException as e:
if e.status == 404:
print(f"Service {service_name} not found in namespace {namespace}.")
continue
else:
raise e
if retval:
return (False, retval)
return(True, None)
================================================
FILE: Kubernetes/legos/k8s_get_services/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_services/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_services/k8s_get_services.json
================================================
{
"action_title": "Get Kubernetes Services for a given Namespace",
"action_description": "Get Kubernetes Services for a given Namespace",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_services",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S"]
}
================================================
FILE: Kubernetes/legos/k8s_get_services/k8s_get_services.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
namespace: str = Field(
title='Namespace',
description='Kubernetes namespace')
def k8s_get_services_printer(output):
if output is None:
return
print(output)
def k8s_get_services(handle, namespace: str) -> str:
"""k8s_get_services get services
:type handle: object
:param handle: Object returned from the Task validate method
:type namespace: str
:param namespace: Kubernetes namespace.
:rtype: string
"""
coreApiClient = client.CoreV1Api(api_client=handle)
try:
resp = coreApiClient.list_namespaced_service(namespace)
except ApiException as e:
resp = 'An Exception occured while executing the command' + e.reason
return resp
================================================
FILE: Kubernetes/legos/k8s_get_unbound_pvcs/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_unbound_pvcs/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_unbound_pvcs/k8s_get_unbound_pvcs.json
================================================
{
"action_title": "Get Kubernetes Unbound PVCs",
"action_description": "Get Kubernetes Unbound PVCs",
"action_type": "LEGO_TYPE_K8S",
"action_is_check": true,
"action_entry_function": "k8s_get_unbound_pvcs",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S"]
}
================================================
FILE: Kubernetes/legos/k8s_get_unbound_pvcs/k8s_get_unbound_pvcs.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
from typing import Tuple
from pydantic import BaseModel, Field
from kubernetes import client
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
namespace: str = Field (
'',
description='K8S Namespace',
title="K8S Namespace"
)
def k8s_get_unbound_pvcs_printer(output):
if output is None:
return
print(output)
def k8s_get_unbound_pvcs(handle, namespace:str = '') -> Tuple:
"""k8s_get_unbound_pvcs This function all unbound PVCS and returns them back
:type handle: Object
:param handle: Object returned from the task.validate(...) function
:type namespace: str
:param namespace: Kubernetes Namespace
:rtype: Tuple Result in tuple format.
"""
if handle.client_side_validation is not True:
raise ApiException(f"K8S Connector is invalid {handle}")
v1 = client.CoreV1Api(api_client=handle)
# Get all PVCs in the cluster
if not namespace:
pvc_list = v1.list_persistent_volume_claim_for_all_namespaces().items
pod_list = v1.list_pod_for_all_namespaces().items
else:
pvc_list = v1.list_namespaced_persistent_volume_claim(namespace).items
pod_list = v1.list_namespaced_pod(namespace).items
retval = []
mounted_volume = []
list_all_volumes = []
# Iterate through each PVC
for pvc in pvc_list:
list_all_volumes.append([pvc.metadata.name, pvc.metadata.namespace])
for pod in pod_list:
for volume in pod.spec.volumes:
if volume.persistent_volume_claim is not None:
mounted_volume.append([
volume.persistent_volume_claim.claim_name,
pod.metadata.namespace
])
if len(mounted_volume) != len(list_all_volumes):
unmounted_volumes = {x[0] for x in list_all_volumes} - {x[0] for x in mounted_volume}
for um in unmounted_volumes:
n = [x for x in list_all_volumes if x[0] == um][0]
unmounted_pvc_name = n[0]
unmounted_pvc_namespace = n[1]
retval.append({'name': unmounted_pvc_name, 'namespace': unmounted_pvc_namespace})
if retval:
return (False, retval)
return (True, [])
================================================
FILE: Kubernetes/legos/k8s_get_versioning_info/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_get_versioning_info/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_get_versioning_info/k8s_get_versioning_info.json
================================================
{
"action_title": "Get versioning info",
"action_description": "This action gets the kubectl, Kubernetes cluster, and Docker version if available.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_get_versioning_info",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": ["CATEGORY_TYPE_INFORMATION" , "CATEGORY_TYPE_K8S", "CATEGORY_TYPE_DEVOPS"]
}
================================================
FILE: Kubernetes/legos/k8s_get_versioning_info/k8s_get_versioning_info.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
import subprocess
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def k8s_get_versioning_info_printer(output):
print("Versions:")
for key, value in output.items():
print(f"{key}: {value}")
def k8s_get_versioning_info(handle):
"""
k8s_get_versioning_info returns the kubectl, Kubernetes cluster, and Docker version if available.
:type handle: Object
:param handle: Object returned from the task.validate(...) function
:rtype: Dict of version results.
"""
versions = {}
try:
# Getting kubectl version
kubectl_version_command = ["kubectl", "version", "--client", "--short"]
result = subprocess.run(kubectl_version_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode == 0:
versions['kubectl'] = result.stdout.decode('utf-8').strip()
except FileNotFoundError:
versions['kubectl'] = "Not found"
try:
# Getting Kubernetes cluster version
k8s_version_command = ["kubectl", "version", "--short"]
result = subprocess.run(k8s_version_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode == 0:
versions['kubernetes'] = result.stdout.decode('utf-8').strip()
except FileNotFoundError:
versions['kubernetes'] = "Not found"
try:
# Getting Docker version
docker_version_command = ["docker", "version", "--format", "'{{.Server.Version}}'"]
result = subprocess.run(docker_version_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode == 0:
versions['docker'] = result.stdout.decode('utf-8').strip()
except FileNotFoundError:
versions['docker'] = "Not found"
return versions
================================================
FILE: Kubernetes/legos/k8s_kubectl_command/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_kubectl_config_set_context/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_kubectl_config_set_context/k8s_kubectl_config_set_context.json
================================================
{
"action_title": "Kubectl set context entry in kubeconfig",
"action_description": "Kubectl set context entry in kubeconfig",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_kubectl_config_set_context",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL"]
}
================================================
FILE: Kubernetes/legos/k8s_kubectl_config_set_context/k8s_kubectl_config_set_context.py
================================================
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
k8s_cli_string: str = Field(
title='Kubectl Command',
description='kubectl sets a context entry in kubeconfig',
default='kubectl config set-context --current --namespace={namespace}'
)
namespace: str = Field(
title='Namespace',
description='Namespace'
)
def k8s_kubectl_config_set_context_printer(data: list):
if data is None:
return
print (data)
def k8s_kubectl_config_set_context(handle, k8s_cli_string: str, namespace: str) -> str:
"""k8s_kubectl_config_set_context
:type handle: object
:param handle: Object returned from the Task validate method
:type k8s_cli_string: str
:param k8s_cli_string: kubectl sets a context entry in kubeconfig
:type namespace: str
:param namespace: Namespace
:rtype: str
"""
k8s_cli_string = k8s_cli_string.format(namespace=namespace)
result = handle.run_native_cmd(k8s_cli_string)
if result is None:
print(
f"Error while executing command ({k8s_cli_string}) (empty response)")
return ""
if result.stderr:
raise ApiException(f"Error occurred while executing command {k8s_cli_string} {result.stderr}")
return result.stdout
================================================
FILE: Kubernetes/legos/k8s_kubectl_config_view/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_kubectl_config_view/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_kubectl_config_view/k8s_kubectl_config_view.json
================================================
{
"action_title": "Kubectl display merged kubeconfig settings",
"action_description": "Kubectl display merged kubeconfig settings",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_kubectl_config_view",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL"]
}
================================================
FILE: Kubernetes/legos/k8s_kubectl_config_view/k8s_kubectl_config_view.py
================================================
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
k8s_cli_string: str = Field(
title='Kubectl Command',
description='kubectl Displays merged kubeconfig settings',
default='kubectl config view -n {namespace}'
)
namespace: str = Field(
title='Namespace',
description='Namespace'
)
def k8s_kubectl_config_view_printer(data: str):
if data is None:
print("Error while executing command")
return
print (data)
def k8s_kubectl_config_view(handle, k8s_cli_string: str, namespace: str) -> str:
"""k8s_kubectl_config_view executes the given kubectl command
:type handle: object
:param handle: Object returned from the Task validate method
:type k8s_cli_string: str
:param k8s_cli_string: kubectl Displays merged kubeconfig settings.
:type namespace: str
:param namespace: Namespace.
:rtype: String, Output of the command in python string format or
Empty String in case of Error.
"""
k8s_cli_string = k8s_cli_string.format(namespace=namespace)
result = handle.run_native_cmd(k8s_cli_string)
if result is None:
print(
f"Error while executing command ({k8s_cli_string}) (empty response)")
return ""
if result.stderr:
raise ApiException(f"Error occurred while executing command {k8s_cli_string} {result.stderr}")
return result.stdout
================================================
FILE: Kubernetes/legos/k8s_kubectl_delete_pod/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_kubectl_describe_node/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_kubectl_describe_node/k8s_kubectl_describe_node.json
================================================
{
"action_title": "Kubectl describe a node",
"action_description": "Kubectl describe a node",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_kubectl_describe_node",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL","CATEGORY_TYPE_K8S_NODE"]
}
================================================
FILE: Kubernetes/legos/k8s_kubectl_describe_node/k8s_kubectl_describe_node.py
================================================
from pprint import pprint
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
node_name: str = Field(
title='Node Name',
description='Node Name'
)
k8s_cli_string: str = Field(
title='Kubectl Command',
description='kubectl describe a node',
default='kubectl describe node {node_name}'
)
def k8s_kubectl_describe_node_printer(data: str):
if data is None:
return
print("Node Details:")
pprint(data)
def k8s_kubectl_describe_node(handle, node_name: str, k8s_cli_string: str) -> str:
"""k8s_kubectl_describe_node executes the given kubectl command
:type handle: object
:param handle: Object returned from the Task validate method
:type k8s_cli_string: str
:param k8s_cli_string: kubectl describe node {node_name}.
:type node_name: str
:param node_name: Node Name.
:rtype: String, Output of the command in python string format or
Empty String in case of Error.
"""
k8s_cli_string = k8s_cli_string.format(node_name=node_name)
result = handle.run_native_cmd(k8s_cli_string)
if result is None:
print(
f"Error while executing command ({k8s_cli_string}) (empty response)")
return ""
if result.stderr:
raise ApiException(f"Error occurred while executing command {k8s_cli_string} {result.stderr}")
data = result.stdout
return data
================================================
FILE: Kubernetes/legos/k8s_kubectl_describe_pod/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_kubectl_describe_pod/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_kubectl_describe_pod/k8s_kubectl_describe_pod.json
================================================
{
"action_title": "Kubectl describe a pod",
"action_description": "Kubectl describe a pod",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_kubectl_describe_pod",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL","CATEGORY_TYPE_K8S_POD"]
}
================================================
FILE: Kubernetes/legos/k8s_kubectl_describe_pod/k8s_kubectl_describe_pod.py
================================================
from pprint import pprint
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
pod_name: str = Field(
title='Pod Name',
description='Pod Name'
)
k8s_cli_string: str = Field(
title='Kubectl Command',
description='kubectl describe a pod',
default='kubectl describe pod {pod_name} -n {namespace}'
)
namespace: str = Field(
title='Namespace',
description='Namespace'
)
def k8s_kubectl_describe_pod_printer(data: str):
if data is None:
return
print("Pod Details:")
pprint(data)
def k8s_kubectl_describe_pod(handle, pod_name: str, k8s_cli_string: str, namespace: str) -> str:
"""k8s_kubectl_describe_pod executes the given kubectl command
:type handle: object
:param handle: Object returned from the Task validate method
:type k8s_cli_string: str
:param k8s_cli_string: kubectl describe pod {pod_name} -n {namespace}.
:type node_name: str
:param node_name: Node Name.
:type namespace: str
:param namespace:Namespace
:rtype: String, Output of the command in python string format or
Empty String in case of Error.
"""
k8s_cli_string = k8s_cli_string.format(
pod_name=pod_name, namespace=namespace)
result = handle.run_native_cmd(k8s_cli_string)
if result.stderr:
raise ApiException(
f"Error occurred while executing command {result.stderr}")
data = result.stdout
return data
================================================
FILE: Kubernetes/legos/k8s_kubectl_drain_node/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_kubectl_drain_node/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_kubectl_drain_node/k8s_kubectl_drain_node.json
================================================
{
"action_title": "Kubectl drain a node",
"action_description": "Kubectl drain a node",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_kubectl_drain_node",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL","CATEGORY_TYPE_K8S_NODE"]
}
================================================
FILE: Kubernetes/legos/k8s_kubectl_drain_node/k8s_kubectl_drain_node.py
================================================
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
k8s_cli_string: str = Field(
title='Kubectl Command',
description='kubectl drain a node in preparation of a maintenance',
default='kubectl drain {node_name}'
)
node_name: str = Field(
title='Node Name',
description='Node Name'
)
def k8s_kubectl_drain_node_printer(data: str):
if data is None:
print("Error while executing command")
return
print (data)
def k8s_kubectl_drain_node(handle, k8s_cli_string: str, node_name:str) -> str:
"""k8s_kubectl_drain_node executes the given kubectl command
:type handle: object
:param handle: Object returned from the Task validate method
:type k8s_cli_string: str
:param k8s_cli_string: kubectl drain {node_name}.
:type node_name: str
:param node_name: Node Name.
:rtype: String, Output of the command in python string format or
Empty String in case of Error.
"""
k8s_cli_string = k8s_cli_string.format(node_name=node_name)
result = handle.run_native_cmd(k8s_cli_string)
if result is None:
print(
f"Error while executing command ({k8s_cli_string}) (empty response)")
return ""
if result.stderr:
raise ApiException(f"Error occurred while executing command {k8s_cli_string} {result.stderr}")
return result.stdout
================================================
FILE: Kubernetes/legos/k8s_kubectl_exec_command/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_kubectl_exec_command/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_kubectl_exec_command/k8s_kubectl_exec_command.json
================================================
{
"action_title": "Execute command on a pod",
"action_description": "Execute command on a pod",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_kubectl_exec_command",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL"]
}
================================================
FILE: Kubernetes/legos/k8s_kubectl_exec_command/k8s_kubectl_exec_command.py
================================================
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
k8s_cli_string: str = Field(
title='Kubectl Command',
description='kubectl execute a command in pod',
default='kubectl exec {pod_name} {command} -n {namespace}'
)
pod_name: str = Field(
title='Pod Name',
description='Pod Name'
)
command: str = Field(
title='Command',
description='Command'
)
namespace: str = Field(
title='Namespace',
description='Namespace'
)
def k8s_kubectl_exec_command_printer(data: str):
if data is None:
print("Error while executing command")
return
print (data)
def k8s_kubectl_exec_command(
handle,
k8s_cli_string: str,
pod_name:str,
command: str,
namespace: str
) -> str:
"""k8s_kubectl_exec_command executes the given kubectl command on the pod
:type handle: object
:param handle: Object returned from the Task validate method
:type k8s_cli_string: str
:param k8s_cli_string: kubectl exec {pod_name} {command} -n {namespace}.
:type pod_name: str
:param pod_name: Pod Name.
:type command: str
:param command: Command.
:type namespace: str
:param namespace: Namespace.
:rtype: String, Output of the command in python string format or
Empty String in case of Error.
"""
k8s_cli_string = k8s_cli_string.format(pod_name=pod_name, command=command, namespace=namespace)
result = handle.run_native_cmd(k8s_cli_string)
if result is None:
print(
f"Error while executing command ({k8s_cli_string}) (empty response)")
return ""
if result.stderr:
raise ApiException(f"Error occurred while executing command {k8s_cli_string} {result.stderr}")
return result.stdout
================================================
FILE: Kubernetes/legos/k8s_kubectl_get_api_resources/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_kubectl_get_api_resources/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_kubectl_get_api_resources/k8s_kubectl_get_api_resources.json
================================================
{
"action_title": "Kubectl get api resources",
"action_description": "Kubectl get api resources",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_kubectl_get_api_resources",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL"]
}
================================================
FILE: Kubernetes/legos/k8s_kubectl_get_api_resources/k8s_kubectl_get_api_resources.py
================================================
from pprint import pprint
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
k8s_cli_string: str = Field(
title='Kubectl Command',
description='kubectl get api resources',
default='kubectl api-resources -o wide -n {namespace}'
)
namespace: str = Field(
title='Namespace',
description='Namespace',
)
def k8s_kubectl_get_api_resources_printer(data: str):
if data is None:
print("Error while executing command")
return
pprint (data)
def k8s_kubectl_get_api_resources(handle, k8s_cli_string: str, namespace: str) -> str:
"""k8s_kubectl_get_api_resources executes the given kubectl command
:type handle: object
:param handle: Object returned from the Task validate method
:type k8s_cli_string: str
:param k8s_cli_string: kubectl api-resources -o wide -n {namespace}.
:type namespace: str
:param namespace: Namespace.
:rtype: String, Output of the command in python string format or
Empty String in case of Error.
"""
k8s_cli_string = k8s_cli_string.format(namespace=namespace)
result = handle.run_native_cmd(k8s_cli_string)
if result is None:
print(
f"Error while executing command ({k8s_cli_string}) (empty response)")
return ""
if result.stderr:
raise ApiException(f"Error occurred while executing command {k8s_cli_string} {result.stderr}")
return result.stdout
================================================
FILE: Kubernetes/legos/k8s_kubectl_get_logs/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_kubectl_get_logs/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_kubectl_get_logs/k8s_kubectl_get_logs.json
================================================
{
"action_title": "Kubectl get logs",
"action_description": "Kubectl get logs for a given pod",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_kubectl_get_logs",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL"]
}
================================================
FILE: Kubernetes/legos/k8s_kubectl_get_logs/k8s_kubectl_get_logs.py
================================================
from pprint import pprint
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
k8s_cli_string: str = Field(
title='Kubectl Command',
description='kubectl get logs for a given pod',
default='"kubectl logs {pod_name} -n {namespace}"'
)
pod_name: str = Field(
title='Pod Name',
description='Pod Name'
)
namespace: str = Field(
title='Namespace',
description='Namespace'
)
def k8s_kubectl_get_logs_printer(data: str):
if data is None:
return
print("Logs:")
pprint(data)
def k8s_kubectl_get_logs(handle, k8s_cli_string: str, pod_name: str, namespace:str) -> str:
"""k8s_kubectl_get_logs executes the given kubectl command
:type handle: object
:param handle: Object returned from the Task validate method
:type k8s_cli_string: str
:param k8s_cli_string: kubectl logs {pod_name} -n {namespace}.
:type pod_name: str
:param pod_name: Pod Name.
:type namespace: str
:param namespace: Namespace.
:rtype: String, Output of the command in python string format or
Empty String in case of Error.
"""
k8s_cli_string = k8s_cli_string.format(pod_name=pod_name, namespace=namespace)
result = handle.run_native_cmd(k8s_cli_string)
if result is None:
print(
f"Error while executing command ({k8s_cli_string}) (empty response)")
return ""
if result.stderr:
raise ApiException(f"Error occurred while executing command {k8s_cli_string} {result.stderr}")
data = result.stdout
return data
================================================
FILE: Kubernetes/legos/k8s_kubectl_get_service_namespace/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_kubectl_get_service_namespace/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_kubectl_get_service_namespace/k8s_kubectl_get_service_namespace.json
================================================
{
"action_title": "Kubectl get services",
"action_description": "Kubectl get services in a given namespace",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_kubectl_get_service_namespace",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL","CATEGORY_TYPE_K8S_NAMESPACE"]
}
================================================
FILE: Kubernetes/legos/k8s_kubectl_get_service_namespace/k8s_kubectl_get_service_namespace.py
================================================
import io
import pandas as pd
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
k8s_cli_string: str = Field(
title='Kubectl Command',
description='kubectl list services in current namespace',
default='kubectl get service -n {namespace}'
)
namespace: str = Field(
title='Namespace',
description='Namespace'
)
def k8s_kubectl_get_service_namespace_printer(data: list):
if data is None:
return
print("Service List:")
for service in data:
print(f"\t {service}")
def k8s_kubectl_get_service_namespace(handle, k8s_cli_string: str, namespace: str) -> list:
"""k8s_kubectl_get_service_namespace executes the given kubectl command
:type handle: object
:param handle: Object returned from the Task validate method
:type k8s_cli_string: str
:param k8s_cli_string: kubectl get service -n {namespace}.
:type namespace: str
:param namespace: Namespace.
:rtype:
"""
k8s_cli_string = k8s_cli_string.format(namespace=namespace)
result = handle.run_native_cmd(k8s_cli_string)
if result is None:
print(
f"Error while executing command ({k8s_cli_string}) (empty response)")
return []
if result.stderr:
raise ApiException(f"Error occurred while executing command {k8s_cli_string} {result.stderr}")
df = pd.read_fwf(io.StringIO(result.stdout))
all_services = []
for index, row in df.iterrows():
all_services.append(row['NAME'])
return all_services
================================================
FILE: Kubernetes/legos/k8s_kubectl_list_pods/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_kubectl_list_pods/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_kubectl_list_pods/k8s_kubectl_list_pods.json
================================================
{
"action_title": "Kubectl list pods",
"action_description": "Kubectl list pods in given namespace",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_kubectl_list_pods",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL","CATEGORY_TYPE_K8S_POD"]
}
================================================
FILE: Kubernetes/legos/k8s_kubectl_list_pods/k8s_kubectl_list_pods.py
================================================
import io
import pandas as pd
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
k8s_cli_string: str = Field(
title='Kubectl Command',
description='kubectl List pods in given namespace',
default='kubectl get pods -n {namespace}'
)
namespace: str = Field(
title='Namespace',
description='Namespace'
)
def k8s_kubectl_list_pods_printer(data: list):
if data is None:
return
print("POD List:")
for pod in data:
print(f"\t {pod}")
def k8s_kubectl_list_pods(handle, k8s_cli_string: str, namespace: str) -> list:
"""k8s_kubectl_list_pods executes the given kubectl command
:type handle: object
:param handle: Object returned from the Task validate method
:type k8s_cli_string: str
:param k8s_cli_string: kubectl get pods -n {namespace}.
:type namespace: str
:param namespace: Namespace.
:rtype:
"""
k8s_cli_string = k8s_cli_string.format(namespace=namespace)
result = handle.run_native_cmd(k8s_cli_string)
if result is None:
print(
f"Error while executing command ({k8s_cli_string}) (empty response)")
return []
if result.stderr:
raise ApiException(
f"Error occurred while executing command {k8s_cli_string} {result.stderr}")
df = pd.read_fwf(io.StringIO(result.stdout))
all_pods = []
for index, row in df.iterrows():
all_pods.append(row['NAME'])
return all_pods
================================================
FILE: Kubernetes/legos/k8s_kubectl_patch_pod/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_kubectl_patch_pod/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_kubectl_patch_pod/k8s_kubectl_patch_pod.json
================================================
{
"action_title": "Kubectl update field",
"action_description": "Kubectl update field of a resource using strategic merge patch",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_kubectl_patch_pod",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL","CATEGORY_TYPE_K8S_POD"]
}
================================================
FILE: Kubernetes/legos/k8s_kubectl_patch_pod/k8s_kubectl_patch_pod.py
================================================
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
k8s_cli_string: str = Field(
title='Kubectl Command',
description='kubectl update field of a resource using strategic merge patch',
default="kubectl patch pod {pod_name} -p '{patch}' -n {namespace}"
)
pod_name: str = Field(
title='Pod Name',
description='Pod Name'
)
patch: str = Field(
title='Patch',
description='The patch to be applied to the resource'
)
namespace: str = Field(
title='Namespace',
description='Namespace'
)
def k8s_kubectl_patch_pod_printer(data: str):
if data is None:
print("Error while executing command")
return
print (data)
def k8s_kubectl_patch_pod(
handle,
k8s_cli_string: str,
pod_name:str,
patch: str,
namespace: str
) -> str:
"""k8s_kubectl_patch_pod executes the given kubectl command
:type handle: object
:param handle: Object returned from the Task validate method
:type k8s_cli_string: str
:param k8s_cli_string: kubectl patch pod {pod_name} -p '{patch}' -n {namespace}.
:type pod_name: str
:param pod_name: Pod Name.
:type patch: str
:param patch: The patch to be applied to the resource.
:type namespace: str
:param namespace: Namespace.
:rtype: String, Output of the command in python string format or
Empty String in case of Error.
"""
k8s_cli_string = k8s_cli_string.format(pod_name=pod_name, patch=patch, namespace=namespace)
result = handle.run_native_cmd(k8s_cli_string)
if result is None:
print(
f"Error while executing command ({k8s_cli_string}) (empty response)")
return ""
if result.stderr:
raise ApiException(f"Error occurred while executing command {k8s_cli_string} {result.stderr}")
return result.stdout
================================================
FILE: Kubernetes/legos/k8s_kubectl_rollout_deployment/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://unskript.com)
================================================
FILE: Kubernetes/legos/k8s_kubectl_rollout_deployment/__init__.py
================================================
# 2022 (c) unSkript.com
================================================
FILE: Kubernetes/legos/k8s_kubectl_rollout_deployment/k8s_kubectl_rollout_deployment.json
================================================
{ "action_title": "Kubectl rollout deployment history",
"action_description": "Kubectl rollout deployment history",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_kubectl_rollout_deployment",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL"]
}
================================================
FILE: Kubernetes/legos/k8s_kubectl_rollout_deployment/k8s_kubectl_rollout_deployment.py
================================================
#
# Copyright (c) 2022 unSkript.com
# All rights reserved.
#
from pydantic import BaseModel, Field
from beartype import beartype
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
k8s_cli_string: str = Field(
title='Kubectl Command',
description='kubectl command '
'eg "kubectl get pods --all-namespaces"'
)
deployment: str = Field(
title='Deployment Name',
description='Deployment Name'
)
namespace: str = Field(
title='Namespace',
description='Namespace'
)
@beartype
def k8s_kubectl_rollout_deployment_printer(data: str):
if data is None:
print("Error while executing command")
return
print (data)
@beartype
def k8s_kubectl_rollout_deployment(
handle,
k8s_cli_string: str,
deployment: str,
namespace: str
) -> str:
k8s_cli_string = k8s_cli_string.format(deployment=deployment, namespace=namespace)
result = handle.run_native_cmd(k8s_cli_string)
if result is None:
print(
f"Error while executing command ({k8s_cli_string}) (empty response)")
return ""
if result.stderr:
raise ApiException(
f"Error occurred while executing command {k8s_cli_string} {result.stderr}")
return result.stdout
## Duplicate code?
================================================
FILE: Kubernetes/legos/k8s_kubectl_scale_deployment/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_kubectl_scale_deployment/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_kubectl_scale_deployment/k8s_kubectl_scale_deployment.json
================================================
{
"action_title": "Kubectl scale deployment",
"action_description": "Kubectl scale a given deployment",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_kubectl_scale_deployment",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL"]
}
================================================
FILE: Kubernetes/legos/k8s_kubectl_scale_deployment/k8s_kubectl_scale_deployment.py
================================================
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
k8s_cli_string: str = Field(
title='Kubectl Command',
description='kubectl Scale a given deployment',
default='kubectl scale --replicas={num} deployment {deployment} -n {namespace}'
)
num: str = Field(
title='Specified Size',
description='Specified Size'
)
deployment: str = Field(
title='Deployment Name',
description='Deployment Name'
)
namespace: str = Field(
title='Namespace',
description='Namespace'
)
def k8s_kubectl_scale_deployment_printer(data: str):
if data is None:
print("Error while executing command")
return
print (data)
def k8s_kubectl_scale_deployment(
handle,
k8s_cli_string: str,
num: str,
deployment: str,
namespace:str
) -> str:
"""k8s_kubectl_scale_deployment executes the given kubectl command
:type handle: object
:param handle: Object returned from the Task validate method
:type k8s_cli_string: str
:param k8s_cli_string: kubectl scale --replicas={num} deployment {deployment} -n {namespace}.
:type num: str
:param num: Specified Size.
:type deployment: str
:param deployment: Deployment Name.
:type namespace: str
:param namespace: Namespace.
:rtype: String, Output of the command in python string format or Empty String in case of Error.
"""
k8s_cli_string = k8s_cli_string.format(num=num, deployment=deployment, namespace=namespace)
result = handle.run_native_cmd(k8s_cli_string)
if result is None:
print(
f"Error while executing command ({k8s_cli_string}) (empty response)")
return ""
if result.stderr:
raise ApiException(f"Error occurred while executing command {k8s_cli_string} {result.stderr}")
return result.stdout
================================================
FILE: Kubernetes/legos/k8s_kubectl_show_metrics_node/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_kubectl_show_metrics_node/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_kubectl_show_metrics_node/k8s_kubectl_show_metrics_node.json
================================================
{
"action_title": "Kubectl show metrics",
"action_description": "Kubectl show metrics for a given node",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_kubectl_show_metrics_node",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL","CATEGORY_TYPE_K8S_NODE"]
}
================================================
FILE: Kubernetes/legos/k8s_kubectl_show_metrics_node/k8s_kubectl_show_metrics_node.py
================================================
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
k8s_cli_string: str = Field(
title='Kubectl Command',
description='kubectl Show metrics for a given node',
default='kubectl top node {node_name}'
)
node_name: str = Field(
title='Node Name',
description='Node Name'
)
def k8s_kubectl_show_metrics_node_printer(data: str):
if data is None:
print("Error while executing command")
return
print(data)
def k8s_kubectl_show_metrics_node(handle, k8s_cli_string: str, node_name: str) -> str:
"""k8s_kubectl_show_metrics_node executes the given kubectl command
:type handle: object
:param handle: Object returned from the Task validate method
:type k8s_cli_string: str
:param k8s_cli_string: kubectl top node {node_name}.
:type node_name: str
:param node_name: Node Name.
:rtype: String, Output of the command in python string format or
Empty String in case of Error.
"""
k8s_cli_string = k8s_cli_string.format(node_name=node_name)
result = handle.run_native_cmd(k8s_cli_string)
if result is None:
print(
f"Error while executing command ({k8s_cli_string}) (empty response)")
return ""
if result.stderr:
raise ApiException(
f"Error occurred while executing command {k8s_cli_string} {result.stderr}")
return result.stdout
================================================
FILE: Kubernetes/legos/k8s_kubectl_show_metrics_pod/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_kubectl_show_metrics_pod/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_kubectl_show_metrics_pod/k8s_kubectl_show_metrics_pod.json
================================================
{
"action_title": "Kubectl show metrics",
"action_description": "Kubectl show metrics for a given pod",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_kubectl_show_metrics_pod",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL","CATEGORY_TYPE_K8S_POD"]
}
================================================
FILE: Kubernetes/legos/k8s_kubectl_show_metrics_pod/k8s_kubectl_show_metrics_pod.py
================================================
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
k8s_cli_string: str = Field(
title='Kubectl Command',
description='kubectl show metrics for a given pod',
default='kubectl top pod {pod_name} -n {namespace}'
)
pod_name: str = Field(
title='Pod Name',
description='Pod Name'
)
namespace: str = Field(
title='Namespace',
description='Namespace'
)
def k8s_kubectl_show_metrics_pod_printer(data: str):
if data is None:
print("Error while executing command")
return
print (data)
def k8s_kubectl_show_metrics_pod(
handle,
k8s_cli_string: str,
pod_name:str,
namespace:str
) -> str:
"""k8s_kubectl_show_metrics_node executes the given kubectl command
:type handle: object
:param handle: Object returned from the Task validate method
:type k8s_cli_string: str
:param k8s_cli_string: kubectl top pod {pod_name} -n {namespace}.
:type pod_name: str
:param pod_name: Pod Name.
:type namespace: str
:param namespace: Namespace.
:rtype: String, Output of the command in python string format or
Empty String in case of Error.
"""
k8s_cli_string = k8s_cli_string.format(pod_name=pod_name, namespace=namespace)
result = handle.run_native_cmd(k8s_cli_string)
if result is None:
print(
f"Error while executing command ({k8s_cli_string}) (empty response)")
return ""
if result.stderr:
raise ApiException(
f"Error occurred while executing command {k8s_cli_string} {result.stderr}")
return result.stdout
================================================
FILE: Kubernetes/legos/k8s_list_all_matching_pods/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_list_all_matching_pods/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_list_all_matching_pods/k8s_list_all_matching_pods.json
================================================
{
"action_title": "List matching name pods",
"action_description": "List all pods matching a particular name string. The matching string can be a regular expression too",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_list_all_matching_pods",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL","CATEGORY_TYPE_K8S_POD"]
}
================================================
FILE: Kubernetes/legos/k8s_list_all_matching_pods/k8s_list_all_matching_pods.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
import re
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from tabulate import tabulate
from kubernetes import client
pp = pprint.PrettyPrinter(indent=2)
class InputSchema(BaseModel):
namespace: Optional[str] = Field(
default='all',
title='Namespace',
description='Kubernetes namespace')
matchstr: str = Field(
title='Match String',
description='''
Matching name string. The matching string can be a regular expression too.
For eg. ^[a-zA-Z0-9]+$ //string consists only of alphanumerics.
''')
def k8s_list_all_matching_pods_printer(output):
if output is None:
return
(match_pods, data) = output
if len(match_pods) > 0:
print("\n")
print(tabulate(data, tablefmt="grid", headers=[
"Pod Ip", "Namespace", "Name", "Status", "Start Time"]))
if not data:
pp.pprint("No Matching Pods !!!")
def k8s_list_all_matching_pods(handle, matchstr: str, namespace: str = 'all') -> Tuple:
"""k8s_list_all_matching_pods list all matching pods
:type handle: object
:param handle: Object returned from the Task validate method
:type matchstr: str
:param matchstr: Matching name string. The matching string can be a regular expression too.
:type namespace: str
:param namespace: Kubernetes namespace.
:rtype: Tuple
"""
coreApiClient = client.CoreV1Api(api_client=handle)
data = []
match_pods = []
res = coreApiClient.list_namespaced_pod(namespace=namespace, pretty=True)
if len(res.items) > 0:
match_pods = list(filter(lambda x: (
re.search(fr'({matchstr})', x.metadata.name) is not None), res.items))
for pod in match_pods:
data.append([pod.status.pod_ip, pod.metadata.namespace,
pod.metadata.name, pod.status.phase, pod.status.start_time])
return (match_pods, data)
================================================
FILE: Kubernetes/legos/k8s_list_pvcs/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_list_pvcs/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_list_pvcs/k8s_list_pvcs.json
================================================
{
"action_title": "List pvcs",
"action_description": "List pvcs by namespace. By default, it will list all pvcs in all namespaces.",
"action_type": "LEGO_TYPE_K8S",
"action_version": "2.0.0",
"action_entry_function": "k8s_list_pvcs",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL","CATEGORY_TYPE_K8S_PVC"]
}
================================================
FILE: Kubernetes/legos/k8s_list_pvcs/k8s_list_pvcs.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
import pprint
from typing import Optional, List
from pydantic import BaseModel, Field
from kubernetes.client.rest import ApiException
class InputSchema(BaseModel):
namespace: Optional[str] = Field(
default='',
title='Namespace',
description='Kubernetes namespace')
def k8s_list_pvcs_printer(output):
if output is None:
return
pprint.pprint(output)
def k8s_list_pvcs(handle, namespace: str = '') -> List:
"""k8s_list_pvcs list pvcs
:type handle: object
:param handle: Object returned from the Task validate method
:type namespace: str
:param namespace: Kubernetes namespace.
:rtype: List
"""
if namespace == '':
kubectl_command = ('kubectl get pvc -A --output=jsonpath=\'{range .items[*]}'
'{@.metadata.namespace}{","}{@.metadata.name}{"\\n"}{end}\'')
else:
kubectl_command = ('kubectl get pvc -n ' + namespace + ' --output=jsonpath=\''
'{range .items[*]}{@.metadata.namespace}{","}{@.metadata.name}{"\\n"}{end}\'')
result = handle.run_native_cmd(kubectl_command)
if result is None:
print(
f"Error while executing command ({kubectl_command}) (empty response)")
return []
if result.stderr:
raise ApiException(f"Error occurred while executing command {kubectl_command} {result.stderr}")
names_list = [y for y in (x.strip() for x in result.stdout.splitlines()) if y]
output = []
for i in names_list:
ns, name = i.split(",")
output.append({"Namespace": ns, "Name":name})
return output
================================================
FILE: Kubernetes/legos/k8s_measure_worker_node_network_bandwidth/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_measure_worker_node_network_bandwidth/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_measure_worker_node_network_bandwidth/k8s_measure_worker_node_network_bandwidth.json
================================================
{
"action_title": "Measure K8s worker node network bandwidth",
"action_description": "Measures the network bandwidth for each worker node using a DaemonSet and returns the results.",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_measure_worker_node_network_bandwidth",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_INFORMATION" ]
}
================================================
FILE: Kubernetes/legos/k8s_measure_worker_node_network_bandwidth/k8s_measure_worker_node_network_bandwidth.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
from typing import List
from kubernetes import client
from kubernetes.client.rest import ApiException
from tabulate import tabulate
import time
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
namespace_to_check_bandwidth: str = Field(description='The namespace where the DaemonSet will be deployed.', title='Namespace')
def pods_have_written_results(handle, core_v1, label_selector, namespace, timeout=150) -> bool:
"""Check if all pods with the given label selector have written their results."""
end_time = time.time() + timeout
# Marks the beginning of polling
time.sleep(5)
while time.time() < end_time:
pods = core_v1.list_namespaced_pod(namespace=namespace, label_selector=label_selector).items
all_written_results = True
for pod in pods:
pod_name = pod.metadata.name
check_files_command = f"kubectl exec -n {namespace} {pod_name} -- ls /results/"
result = handle.run_native_cmd(check_files_command)
if "time.txt" in result.stdout:
continue
elif "in_progress.txt" in result.stdout:
all_written_results = False
break
else:
all_written_results = False
break
if all_written_results:
return True
# Retrying in 2 seconds...
time.sleep(2)
return False
def k8s_measure_worker_node_network_bandwidth_printer(output):
"""Print the network bandwidth results in tabular format."""
if isinstance(output, list) and isinstance(output[0], str):
print(output[0])
elif output:
headers = ["Node", "Bandwidth"]
table_data = [[entry['Node'], entry['Bandwidth'].replace('Time taken: ', '')] for entry in output]
table = tabulate(table_data, headers=headers, tablefmt='grid')
print(table)
else:
print("No data available or access denied.")
def k8s_measure_worker_node_network_bandwidth(handle, namespace_to_check_bandwidth: str) -> List:
"""
k8s_measure_worker_node_network_bandwidth measures the network bandwidth for each worker node using a DaemonSet and returns the results.
:type handle: object
:param handle: Object returned from the Task validate method
:type namespace: str
:param namespace: The namespace where the DaemonSet will be deployed.
:return: List containing node and bandwidth details.
"""
# DaemonSet spec to run our bandwidth test
daemonset = {
"apiVersion": "apps/v1",
"kind": "DaemonSet",
"metadata": {"name": "bandwidth-tester"},
"spec": {
"selector": {"matchLabels": {"app": "bandwidth-tester"}},
"template": {
"metadata": {"labels": {"app": "bandwidth-tester"}},
"spec": {
"containers": [
{
"name": "tester",
"image": "appropriate/curl",
"command": [
"sh",
"-c",
("touch /results/in_progress.txt && "
"start_time=$(date +%s) && "
"curl -O https://speed.hetzner.de/100MB.bin && "
"end_time=$(date +%s) && "
"duration=$((end_time - start_time)) && "
"echo 'Time taken: '$duration' seconds' > /results/time.txt && "
"rm /results/in_progress.txt")
],
"volumeMounts": [{"name": "results", "mountPath": "/results"}],
}
],
"volumes": [{"name": "results", "emptyDir": {}}],
},
},
},
}
v1 = client.AppsV1Api(api_client=handle)
core_v1 = client.CoreV1Api(api_client=handle)
try:
try:
v1.delete_namespaced_daemon_set(name="bandwidth-tester", namespace=namespace_to_check_bandwidth,
propagation_policy="Foreground", grace_period_seconds=0)
except ApiException as ae:
if ae.status == 404: # Not Found error
print(f"Checking for an existing DaemonSet 'bandwidth-tester' in namespace {namespace_to_check_bandwidth}...")
elif ae.status == 403:
return ["Forbidden: The service account does not have permission to create/delete daemonset."]
else:
raise ae
print(f"Deploying DaemonSet 'bandwidth-tester' in namespace {namespace_to_check_bandwidth}...")
v1.create_namespaced_daemon_set(namespace=namespace_to_check_bandwidth, body=daemonset)
print("Waiting for DaemonSet to run on all nodes...")
if not pods_have_written_results(handle, core_v1, "app=bandwidth-tester", namespace_to_check_bandwidth):
print("Timeout waiting for pods to write results.")
return []
# Collect results
pods = core_v1.list_namespaced_pod(namespace=namespace_to_check_bandwidth, label_selector="app=bandwidth-tester").items
results = []
for pod in pods:
pod_name = pod.metadata.name
retry_count = 0
max_retries = 20
delay_between_retries = 5
while retry_count < max_retries:
print(f"Fetching results from pod: {pod_name}, status: {pod.status.phase}")
if pod.status.phase != "Running":
time.sleep(delay_between_retries)
retry_count += 1
continue
fetch_results_command = f"kubectl exec -n {namespace_to_check_bandwidth} {pod.metadata.name} -- cat /results/time.txt"
fetch_output = handle.run_native_cmd(fetch_results_command)
if fetch_output and not fetch_output.stderr:
bandwidth = fetch_output.stdout.strip()
results.append({"Node": pod.spec.node_name, "Bandwidth": bandwidth})
break
else:
retry_count += 1
print(f"Retrying in {delay_between_retries} seconds...")
time.sleep(delay_between_retries)
print("\nCleaning up: Deleting the DaemonSet after collecting results...\n")
v1.delete_namespaced_daemon_set(name="bandwidth-tester", namespace=namespace_to_check_bandwidth,
propagation_policy="Foreground", grace_period_seconds=0)
return results
except Exception as e:
print("An error occurred. Performing cleanup...")
# Cleanup in case of exceptions: Ensure that DaemonSet is deleted
try:
v1.delete_namespaced_daemon_set(name="bandwidth-tester", namespace=namespace_to_check_bandwidth,
propagation_policy="Foreground", grace_period_seconds=0)
except Exception as cleanup_err:
print(f"Error during cleanup: {cleanup_err}")
raise e
================================================
FILE: Kubernetes/legos/k8s_remove_pod_from_deployment/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Kubernetes/legos/k8s_remove_pod_from_deployment/__init__.py
================================================
================================================
FILE: Kubernetes/legos/k8s_remove_pod_from_deployment/k8s_remove_pod_from_deployment.json
================================================
{
"action_title": "Remove POD from Deployment",
"action_description": "Remove POD from Deployment",
"action_type": "LEGO_TYPE_K8S",
"action_entry_function": "k8s_remove_pod_from_deployment",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_NONE",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_K8S","CATEGORY_TYPE_K8S_KUBECTL","CATEGORY_TYPE_K8S_POD"]
}
================================================
FILE: Kubernetes/legos/k8s_remove_pod_from_deployment/k8s_remove_pod_from_deployment.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from kubernetes import client
class InputSchema(BaseModel):
pod_name: str = Field(
title="Pod Name",
description="K8S Pod Name"
)
namespace: str = Field(
title="Namespace",
description="K8S Namespace where the POD exists"
)
def k8s_remove_pod_from_deployment_printer(output):
if not output:
return
pprint.pprint(output)
def k8s_remove_pod_from_deployment(handle, pod_name: str, namespace: str):
"""k8s_remove_pod_from_deployment This action can be used to remove the given POD in a namespace
from a deployment.
:type handle: Object
:param handle: Object returned from task.validate(...) routine
:type pod_name: str
:param pod_name: Name of the K8S POD (Mandatory parameter)
:type namespace: str
:param namespace: Namespace where the above K8S POD is found (Mandatory parameter)
:rtype: None
"""
if not pod_name or not namespace:
raise Exception("Pod Name and Namespace are Mandatory fields")
core_api = client.CoreV1Api(api_client=handle)
apps_api = client.AppsV1Api(api_client=handle)
# Labels are key-value pairs that can be attached to Kubernetes objects.
# Labels can be used to organize and group objects, and they can be used to
# select objects for operations such as deletion and updates.
# Selectors are used to select a group of objects for an operation. Selectors can be
# specified using labels, and they can be used to select all objects with a given
# label or all objects that match a certain pattern.
# Kubernetes deployment uses Labels and Selectors to select which pods need to be
# updated when a new version of a pod is deployed.
# Here by modifying the selector label for deployment, we are making sure the pod
# is removed from the deployment. We verify the same by listing the pod labels after
# doing a patch operation
try:
pod = core_api.read_namespaced_pod(name=pod_name, namespace=namespace)
owner_references = pod.metadata.owner_references
deployment_name = ''
if isinstance(owner_references, list):
owner_name = owner_references[0].name
owner_kind = owner_references[0].kind
if owner_kind == 'Deployment':
deployment_name = owner_name
else:
raise Exception(f"Unexpected owner_references kind in pod metadata {pod.metadata.owner_references} Only Deployment is supported")
if deployment_name != '':
deployment = apps_api.read_namespaced_deployment(
name=deployment_name,
namespace=namespace
)
deployment_labels= [key for key, value in deployment.spec.selector.match_labels.items()]
pod_labels = [key for key,value in pod.metadata.labels.items()]
common_labels = set(deployment_labels) & set(pod_labels)
new_label = {}
for label in common_labels:
new_label[label] = pod.metadata.labels.get(label) + '-out-for-maintenance'
pod.metadata.labels.update(new_label)
core_api.patch_namespaced_pod(pod_name, namespace, pod)
else:
print(f"ERROR: Could not remove {pod_name} from its deployment in {namespace} ")
except Exception as e:
raise e
================================================
FILE: Kubernetes/legos/k8s_update_command_in_pod_spec/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mantishub/legos/mantishub_get_handle/mantishub_get_handle.json
================================================
{
"action_title": "Get Mantishub handle",
"action_description": "Get Mantishub handle",
"action_type": "LEGO_TYPE_MANTISHUB",
"action_entry_function": "mantishub_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_output_type": "ACTION_OUTPUT_TYPE_NONE",
"action_supports_iteration": false,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MANTISHUB"]
}
================================================
FILE: Mantishub/legos/mantishub_get_handle/mantishub_get_handle.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def mantishub_get_handle(handle):
""" mantishub_get_handle returns the Mantishub handle.
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: Mantishub handle.
"""
return handle
================================================
FILE: Mongo/README.md
================================================
# Mongo Actions
* [MongoDB add new field in all collections](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_add_new_field_in_collections/README.md): MongoDB add new field in all collections
* [MongoDB Aggregate Command](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_aggregate_command/README.md): MongoDB Aggregate Command
* [MongoDB Atlas cluster cloud backup](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_atlas_cluster_backup/README.md): Trigger on-demand Atlas cloud backup
* [Get large MongoDB indices](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_check_large_index_size/README.md): This action compares the size of each index with a given threshold and returns any indexes that exceed the threshold.
* [Get MongoDB large databases](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_compare_disk_size_to_threshold/README.md): This action compares the total disk size used by MongoDB to a given threshold.
* [MongoDB Count Documents](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_count_documents/README.md): MongoDB Count Documents
* [MongoDB Create Collection](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_create_collection/README.md): MongoDB Create Collection
* [MongoDB Create Database](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_create_database/README.md): MongoDB Create Database
* [Delete collection from MongoDB database](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_delete_collection/README.md): Delete collection from MongoDB database
* [MongoDB Delete Database](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_delete_database/README.md): MongoDB Delete Database
* [MongoDB Delete Document](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_delete_document/README.md): MongoDB Delete Document
* [MongoDB Distinct Command](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_distinct_command/README.md): MongoDB Distinct Command
* [MongoDB Find Document](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_find_document/README.md): MongoDB Find Document
* [MongoDB Find One](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_find_one/README.md): MongoDB Find One returns a single entry that matches the query.
* [Get MongoDB Handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_get_handle/README.md): Get MongoDB Handle
* [MongoDB get metrics](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_get_metrics/README.md): This action retrieves various metrics such as index size, disk size per collection for all databases and collections.
* [Get Mongo Server Status](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_get_server_status/README.md): Get Mongo Server Status and check for any abnormalities.
* [MongoDB Insert Document](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_insert_document/README.md): MongoDB Insert Document
* [MongoDB kill queries](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_kill_queries/README.md): MongoDB kill queries
* [Get list of collections in MongoDB Database](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_list_collections/README.md): Get list of collections in MongoDB Database
* [Get list of MongoDB Databases](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_list_databases/README.md): Get list of MongoDB Databases
* [MongoDB list queries](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_list_queries/README.md): MongoDB list queries
* [MongoDB Read Query](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_read_query/README.md): MongoDB Read Query
* [MongoDB remove a field in all collections](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_remove_field_in_collections/README.md): MongoDB remove a field in all collections
* [MongoDB Rename Database](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_rename_database/README.md): MongoDB Rename Database
* [MongoDB Update Document](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_update_document/README.md): MongoDB Update Document
* [MongoDB Upsert Query](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Mongo/legos/mongodb_write_query/README.md): MongoDB Upsert Query
================================================
FILE: Mongo/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/__init__.py
================================================
================================================
FILE: Mongo/legos/mongodb_add_new_field_in_collections/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_add_new_field_in_collections/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_add_new_field_in_collections/mongodb_add_new_field_in_collections.json
================================================
{
"action_title": "MongoDB add new field in all collections",
"action_description": "MongoDB add new field in all collections",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_add_new_field_in_collections",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true ,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB", "CATEGORY_TYPE_MONGODB_COLLECTION"]
}
================================================
FILE: Mongo/legos/mongodb_add_new_field_in_collections/mongodb_add_new_field_in_collections.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
database_name: str = Field(
title='Database Name',
description='Name of the MongoDB database.'
)
collection_name: str = Field(
title='Collection Name',
description='Name of the MongoDB collection.'
)
add_new_fields: dict = Field(
title='Add new fields to every document',
description='''
The addition of fields apply in dictionary format.
For eg: {"field":"value"}.
'''
)
upsert: bool = Field(
True,
title='Upsert',
description='Allow creation of a new document, if one does not exist.'
)
def mongodb_add_new_field_in_collections_printer(output):
if output is None:
return
print("\n\n")
if isinstance(output, List):
for entry in output:
pprint.pprint(entry)
else:
pprint.pprint(output)
def mongodb_add_new_field_in_collections(
handle,
database_name: str,
collection_name: str,
add_new_fields: dict,
upsert: bool = True
) -> List:
"""mongodb_add_new_field_in_collections Add new field to every document in a MongoDB collection.
:type handle: object
:param handle: Object returned from task.validate(...).
:type database_name: str
:param database_name: Name of the MongoDB database.
:type collection_name: str
:param collection_name: Name of the MongoDB collection.
:type add_new_fields: Dict
:param add_new_fields: Add new fields to every document.
:type upsert: bool
:param upsert: Allow creation of a new document, if one does not exist.
:rtype: List with the objectID.
"""
modifications = {"$set": add_new_fields}
try:
handle[database_name][collection_name].update_many(
{},
update=modifications,
upsert=upsert)
res = handle[database_name][collection_name].find()
result = []
for entry in res:
result.append(entry)
return result
except Exception as e:
return [e]
================================================
FILE: Mongo/legos/mongodb_aggregate_command/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_aggregate_command/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_aggregate_command/mongodb_aggregate_command.json
================================================
{
"action_title": "MongoDB Aggregate Command",
"action_description": "MongoDB Aggregate Command",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_aggregate_command",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB"]
}
================================================
FILE: Mongo/legos/mongodb_aggregate_command/mongodb_aggregate_command.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
database_name: str = Field(
title='Database Name',
description='Name of the MongoDB database'
)
collection_name: str = Field(
title='Collection Name',
description='Name of the MongoDB collection'
)
pipeline: list = Field(
title='Pipeline',
description='''
A list of aggregation pipeline stages.
For Eg. [ {
"$group" :
{"_id" : "$user", "num_tutorial" : {"$sum" : 1}}
}
]
In the above example, the documents are grouped on the basis of expression $user,
and then the field num_tutorial includes the accumulator operator $sum that
calculates the number of tutorials of each user.
'''
)
def mongodb_aggregate_command_printer(output):
print("\n\n")
if isinstance(output, List):
for entry in output:
pprint.pprint(entry)
else:
pprint.pprint(output)
def mongodb_aggregate_command(
handle,
database_name: str,
collection_name: str,
pipeline: List
) -> List:
"""mongodb_aggregate_command Retrieves the documents present in the collection
and the count of the documents using count_documents().
:type handle: object
:param handle: Object returned from task.validate(...).
:type database_name: str
:param database_name: Name of the MongoDB database.
:type collection_name: str
:param collection_name: Name of the MongoDB collection.
:type pipeline: List
:param pipeline: A list of aggregation pipeline stages.
:rtype: List of All the results of the query.
"""
try:
result = []
db = handle[database_name]
res = db[collection_name].aggregate(pipeline=pipeline)
for entry in res:
result.append(entry)
return result
except Exception as e:
return [e]
================================================
FILE: Mongo/legos/mongodb_atlas_cluster_backup/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_atlas_cluster_backup/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_atlas_cluster_backup/mongodb_atlas_cluster_backup.json
================================================
{
"action_title": "MongoDB Atlas cluster cloud backup",
"action_description": "Trigger on-demand Atlas cloud backup",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_atlas_cluster_backup",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB", "CATEGORY_TYPE_MONGODB_CLUSTER"]
}
================================================
FILE: Mongo/legos/mongodb_atlas_cluster_backup/mongodb_atlas_cluster_backup.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
import pprint
from typing import Dict
import requests
from pydantic import BaseModel, Field
from requests.auth import HTTPDigestAuth
class InputSchema(BaseModel):
project_name: str = Field(
title='Project Name',
description='Atlas Project Name'
)
cluster_name: str = Field(
title='Cluster Name',
description='Atlas Cluster Name.'
)
description: str = Field(
title='Description',
description="Description of the on-demand snapshot."
)
retention_in_days: int = Field(
default=7,
title='Retention In Days',
description=('Number of days that Atlas should retain the '
'on-demand snapshot. Must be at least 1.')
)
def mongodb_atlas_cluster_backup_printer(output):
if output is None:
return
print("\n\n")
pprint.pprint(output)
def mongodb_atlas_cluster_backup(
handle,
project_name: str,
cluster_name: str,
description: str,
retention_in_days: int = 1) -> Dict:
"""mongodb_atlas_cluster_backup Create backup of MongoDB Cluster.
:type handle: object
:param handle: Object returned from task.validate(...).
:type project_name: str
:param project_name: Atlas Project Name.
:type cluster_name: str
:param cluster_name: Atlas Cluster Name.
:type description: str
:param description: Description of the on-demand snapshot.
:type retention_in_days: int
:param retention_in_days: Retention In Days.
:rtype: Dict of SnapShot.
"""
atlas_base_url = handle.get_base_url()
public_key = handle.get_public_key()
private_key = handle.get_private_key()
auth = HTTPDigestAuth(public_key, private_key)
#Get Project ID from Project Name
url = atlas_base_url + f"/groups/byName/{project_name}"
try:
resp = requests.get(url, auth=auth)
resp.raise_for_status()
except Exception as e:
return {'Get project id failed': str(e)}
project_resp = resp.json()
group_id = project_resp.get("id")
body = {
"description": description,
"retentionInDays" : retention_in_days
}
url = atlas_base_url + (f"/groups/{group_id}/clusters/{cluster_name}/backup"
"/snapshots/?pretty=true")
try:
response = requests.post(url, auth=auth, json=body)
response.raise_for_status()
except Exception as e:
return {'Start snapshot failed': str(e)}
return response.json()
================================================
FILE: Mongo/legos/mongodb_check_large_index_size/README.md
================================================
[
## See it in Action
You can see this Action by following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_check_large_index_size/__init__.py
================================================
================================================
FILE: Mongo/legos/mongodb_check_large_index_size/mongodb_check_large_index_size.json
================================================
{
"action_title": "Get large MongoDB indices",
"action_description": "This action compares the size of each index with a given threshold and returns any indexes that exceed the threshold.",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_check_large_index_size",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB"],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Mongo/legos/mongodb_check_large_index_size/mongodb_check_large_index_size.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import Tuple, Optional
from pydantic import BaseModel, Field
DEFAULT_SIZE= 2048000 # 2GB in KB
class InputSchema(BaseModel):
index_threshold: Optional[float] = Field(
DEFAULT_SIZE,
description='The threshold for total index size. Default is 512000KB.',
title='Index threshold(in KB)',
)
def mongodb_check_large_index_size_printer(output):
success, alerts = output
if success:
print("Index sizes are within the threshold.")
return
# Otherwise, print the alerts
for alert in alerts:
print(f"Alert! Index size of {alert['indexSizeKB']} KB for database '{alert['db']}' in collection '{alert['collection']}' exceeds threshold !")
def mongodb_check_large_index_size(handle, threshold: float = DEFAULT_SIZE) -> Tuple:
"""
mongodb_check_large_index_size checks the index sizes for all databases and collections.
It compares the size of each index with a given threshold and returns any indexes that exceed the threshold.
:type handle: object
:param handle: Object returned from task.validate(...).
:type threshold: float
:param threshold: The threshold for index size in KB
:rtype: Status, the list of the details of the indexes that exceeded the threshold.
"""
# List to hold alerts for indexes that exceed the threshold
alerts = []
try:
database_names = [db for db in handle.list_database_names() if db != 'local']
for db_name in database_names:
db = handle[db_name]
collection_names = db.list_collection_names()
# Iterate through each collection in the database
for coll_name in collection_names:
coll = db.get_collection(coll_name)
# Skip views
if coll.options().get('viewOn'):
continue
stats = db.command("collstats", coll_name)
# Check each index's size
for index_name, index_size in stats['indexSizes'].items():
index_size_KB = index_size / 1024 # Convert to KB
if index_size_KB > threshold:
alerts.append({
'db': db_name,
'collection': coll_name,
'index': index_name,
'indexSizeKB': index_size_KB
})
except Exception as e:
raise e
if len(alerts) != 0:
return (False, alerts)
return (True, None)
================================================
FILE: Mongo/legos/mongodb_compare_disk_size_to_threshold/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_compare_disk_size_to_threshold/__init__.py
================================================
================================================
FILE: Mongo/legos/mongodb_compare_disk_size_to_threshold/mongodb_compare_disk_size_to_threshold.json
================================================
{
"action_title": "Get MongoDB large databases",
"action_description": "This action compares the total disk size used by MongoDB to a given threshold.",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_compare_disk_size_to_threshold",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": [
""
],
"action_next_hop_parameter_mapping": {},
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: Mongo/legos/mongodb_compare_disk_size_to_threshold/mongodb_compare_disk_size_to_threshold.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import Optional, Tuple
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
threshold: Optional[float] = Field(
83886080 , # 80GB in KB
description='Threshold for disk size in KB.',
title='Threshold (in KB)'
)
def mongodb_compare_disk_size_to_threshold_printer(output):
success, alerts = output
if success:
print("Disk sizes are within the threshold.")
return
for alert in alerts:
print(f"Alert! Disk size of {alert['totalDiskSize']} KB for database {alert['db']} exceeds threshold of {alert['threshold']} KB.")
def mongodb_compare_disk_size_to_threshold(handle, threshold: float=83886080) -> Tuple:
"""
mongodb_compare_disk_size_to_threshold compares the total disk size used by MongoDB to a given threshold.
:type handle: object
:param handle: Object returned from Task Validate
:type threshold: float
:param threshold: The threshold for disk size in KB.
:return: Status, a list of alerts if disk size is exceeded.
"""
# Initialize variables
total_disk_size = 0
result = []
# Get a list of database names
database_names = handle.list_database_names()
# Iterate through each database
for db_name in database_names:
db = handle[db_name]
stats = db.command("dbStats")
# Add the dataSize and indexSize to get the total size for the database
total_disk_size = (stats['dataSize'] + stats['indexSize']) / (1024)
if total_disk_size > threshold:
# Append the database name, total disk size, and threshold to the result
result.append({'db': db_name, 'totalDiskSize': total_disk_size, 'threshold': threshold})
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: Mongo/legos/mongodb_count_documents/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_count_documents/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_count_documents/mongodb_count_documents.json
================================================
{
"action_title": "MongoDB Count Documents",
"action_description": "MongoDB Count Documents",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_count_documents",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB", "CATEGORY_TYPE_MONGODB_COLLECTION","CATEGORY_TYPE_MONGODB_DOCUMENT"]
}
================================================
FILE: Mongo/legos/mongodb_count_documents/mongodb_count_documents.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
database_name: str = Field(
title='Database Name',
description='Name of the MongoDB database.'
)
collection_name: str = Field(
title='Collection Name',
description='Name of the MongoDB collection.'
)
filter: dict = Field(
title='Filter Query',
description='''
A query document that selects which documents to count in the collection.
Can be an empty document to count all documents.
For eg: {"foo":"bar"}.
'''
)
def mongodb_count_documents_printer(output):
if output is None:
return
if isinstance(output, int):
pprint.pprint(f"Total number of documents : {output}")
else:
pprint.pprint(output)
def mongodb_count_documents(
handle,
database_name: str,
collection_name: str,
filter: dict
):
"""mongodb_count_documents Retrieves the documents present in
the collection and the count of the documents using count_documents().
:type handle: object
:param handle: Object returned from task.validate(...).
:type database_name: str
:param database_name: Name of the MongoDB database.
:type filter: Dict
:param filter: A query that matches the document to filter.
:rtype: All the results of the query.
"""
# Input param validation.
try:
db = handle[database_name]
total_count = db[collection_name].count_documents(filter)
return total_count
except Exception as e:
return e
================================================
FILE: Mongo/legos/mongodb_create_collection/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_create_collection/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_create_collection/mongodb_create_collection.json
================================================
{
"action_title": "MongoDB Create Collection",
"action_description": "MongoDB Create Collection",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_create_collection",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB", "CATEGORY_TYPE_MONGODB_COLLECTION"]
}
================================================
FILE: Mongo/legos/mongodb_create_collection/mongodb_create_collection.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
database_name: str = Field(
title='Database Name',
description='Name of the MongoDB database'
)
collection_name: str = Field(
title='Collection Name',
description='Name of the MongoDB collection'
)
def mongodb_create_collection_printer(output):
if output[0] is None:
return
print("\n\n")
if isinstance(output[0], Exception):
pprint.pprint(f"Error : {output[0]}")
else:
pprint.pprint("List of all collections after creating new one")
pprint.pprint(output[0])
collection_name = output[1]
if collection_name in output[0]:
pprint.pprint("Collection created successfully !!!")
def mongodb_create_collection(handle, database_name: str, collection_name: str) -> List:
"""mongodb_create_collection create collection in mongodb.
:type handle: object
:param handle: Object returned from task.validate(...).
:type database_name: str
:param database_name: Name of the MongoDB database.
:type collection_name: str
:param collection_name: Name of the MongoDB collection.
:rtype: List of all collections after creating new one.
"""
# Input param validation.
try:
db = handle[database_name]
db.create_collection(collection_name)
# Verification
collection_list = db.list_collection_names()
return [collection_list, collection_name]
except Exception as e:
return [e]
================================================
FILE: Mongo/legos/mongodb_create_database/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_create_database/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_create_database/mongodb_create_database.json
================================================
{
"action_title": "MongoDB Create Database",
"action_description": "MongoDB Create Database",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_create_database",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB"]
}
================================================
FILE: Mongo/legos/mongodb_create_database/mongodb_create_database.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
database_name: str = Field(
title='Database Name',
description='Name of the MongoDB database'
)
collection_name: str = Field(
title='Collection Name',
description='Name of the MongoDB collection'
)
def mongodb_create_database_printer(output):
if output[0] is None:
return
print("\n\n")
if isinstance(output[0], Exception):
pprint.pprint(f"Error : {output[0]}")
else:
pprint.pprint("List of databases after creating new one")
pprint.pprint(output[0])
collection_name = output[1]
if collection_name in output[0]:
pprint.pprint("Database created successfully !!!")
def mongodb_create_database(handle, database_name: str, collection_name: str) -> List:
"""mongodb_create_database create database in mongodb.
:type handle: object
:param handle: Object returned from task.validate(...).
:type database_name: str
:param database_name: Name of the MongoDB database.
:type collection_name: str
:param collection_name: Name of the MongoDB collection.
:rtype: List of Database after creating.
"""
# Input param validation.
try:
db = handle[database_name]
db.create_collection(collection_name)
# Verification
dblist = handle.list_database_names()
return [dblist, database_name]
except Exception as e:
return [e]
================================================
FILE: Mongo/legos/mongodb_delete_collection/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_delete_collection/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_delete_collection/mongodb_delete_collection.json
================================================
{
"action_title": "Delete collection from MongoDB database",
"action_description": "Delete collection from MongoDB database",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_delete_collection",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB", "CATEGORY_TYPE_MONGODB_COLLECTION"]
}
================================================
FILE: Mongo/legos/mongodb_delete_collection/mongodb_delete_collection.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
database_name: str = Field(
title='Database Name',
description='Name of the MongoDB database'
)
collection_name: str = Field(
title='Collection Name',
description='Name of the MongoDB collection'
)
def mongodb_delete_collection_printer(output):
if output is None:
return None
print("\n\n")
if isinstance(output, Exception):
pprint.pprint(f"Error : {output}")
return output
collections_before_drop = output[0]
collections_after_drop = output[1]
pprint.pprint(f"Collection count BEFORE drop:{len(collections_before_drop)}")
pprint.pprint(f"Collection count AFTER drop:{len(collections_after_drop)}")
diff = len(collections_before_drop) - len(collections_after_drop)
if diff != 0:
pprint.pprint("Collection deleted successfully !!!")
return None
def mongodb_delete_collection(handle, database_name: str, collection_name: str) -> List:
"""mongodb_delete_collection delete collection from mongodb database.
:type handle: object
:param handle: Object returned from task.validate(...).
:type database_name: str
:param database_name: Name of the MongoDB database.
:type collection_name: str
:param collection_name: Name of the MongoDB collection.
:rtype: List of the results of the delete query.
"""
# Input param validation.
try:
db = handle[database_name]
collections_before_drop = db.list_collection_names()
db.drop_collection(collection_name)
# Verification
collections_after_drop = db.list_collection_names()
return [collections_before_drop, collections_after_drop]
except Exception as e:
return [e]
================================================
FILE: Mongo/legos/mongodb_delete_database/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_delete_database/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_delete_database/mongodb_delete_database.json
================================================
{
"action_title": "MongoDB Delete Database",
"action_description": "MongoDB Delete Database",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_delete_database",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB"]
}
================================================
FILE: Mongo/legos/mongodb_delete_database/mongodb_delete_database.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
database_name: str = Field(
title='Database Name',
description='Name of the MongoDB database'
)
def mongodb_delete_database_printer(output):
if output is None:
return None
print("\n\n")
if isinstance(output, Exception):
pprint.pprint(f"Error : {output}")
return output
db_names_before_drop = output[0]
db_names_after_drop = output[1]
pprint.pprint(f"db count BEFORE drop:{len(db_names_before_drop)}")
pprint.pprint(f"db count AFTER drop:{len(db_names_after_drop)}")
diff = len(db_names_before_drop) - len(db_names_after_drop)
if diff != 0:
pprint.pprint("Database deleted successfully !!!")
return None
def mongodb_delete_database(handle, database_name: str) -> List:
"""mongodb_delete_database delete database in mongodb.
:type handle: object
:param handle: Object returned from task.validate(...).
:type database_name: str
:param database_name: Name of the MongoDB database.
:rtype: All the results of the query.
"""
# Input param validation.
try:
db_names_before_drop = handle.list_database_names()
handle.drop_database(database_name)
# Verification
db_names_after_drop = handle.list_database_names()
return [db_names_before_drop, db_names_after_drop]
except Exception as e:
return [e]
================================================
FILE: Mongo/legos/mongodb_delete_document/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_delete_document/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_delete_document/mongodb_delete_document.json
================================================
{
"action_title": "MongoDB Delete Document",
"action_description": "MongoDB Delete Document",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_delete_document",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_INT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB", "CATEGORY_TYPE_MONGODB_COLLECTION","CATEGORY_TYPE_MONGODB_DOCUMENT"]
}
================================================
FILE: Mongo/legos/mongodb_delete_document/mongodb_delete_document.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
from unskript.enums.mongo_enums import DeleteCommands
from pymongo.errors import AutoReconnect, ServerSelectionTimeoutError
class InputSchema(BaseModel):
database_name: str = Field(
title='Database Name',
description='Name of the MongoDB database.'
)
collection_name: str = Field(
title='Collection Name',
description='Name of the MongoDB collection.'
)
command: DeleteCommands = Field(
DeleteCommands.delete_one,
title='Command Name',
description='''
Name of command
for Eg. delete_one, delete_many
Supported commands : delete_one and delete_many
'''
)
filter: dict = Field(
title='Filter Query',
description='A query that matches the document to delete For eg: {"foo":"bar"}.'
)
def mongodb_delete_document_printer(output):
print("\n")
if output == 0:
print("No Documents were deleted")
elif output > 1:
print(f"{output.deleted_count} Documents Deleted")
else:
print("Document Deleted")
def mongodb_delete_document(
handle,
database_name: str,
collection_name: str,
command: DeleteCommands,
filter: dict
) -> int:
"""mongodb_delete_document Runs mongo delete command with the provided parameters.
:type handle: object
:param handle: Handle returned from the Task validate command
:type database_name: str
:param database_name: Name of the MongoDB database
:type collection_name: str
:param collection_name: Name of the Collection to the delete the document from
:type command: DeleteCommands
:param command: Enum for DeleteCommand Options are delete_one or delete_many
:type filter: dict
:param filter: Search Filter to perform the delete operation on
:rtype: Count of deleted document
"""
# Lets make sure the handle that is returned is not stale
# and can connect to the MongoDB server
try:
handle.server_info()
except (AutoReconnect, ServerSelectionTimeoutError) as e:
print("[UNSKRIPT]: Reconnection / Server Selection Timeout Error: ", str(e))
raise e
except Exception as e:
print("[UNSKRIPT]: Error Connecting: ", str(e))
raise e
try:
result = None
db = handle[database_name]
if command == DeleteCommands.delete_one:
result = db[collection_name].delete_one(filter)
return result.deleted_count
if command == DeleteCommands.delete_many:
result = db[collection_name].delete_many(filter)
return result.deleted_count
except Exception as e:
raise e
return None
================================================
FILE: Mongo/legos/mongodb_distinct_command/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_distinct_command/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_distinct_command/mongodb_distinct_command.json
================================================
{
"action_title": "MongoDB Distinct Command",
"action_description": "MongoDB Distinct Command",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_distinct_command",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB"]
}
================================================
FILE: Mongo/legos/mongodb_distinct_command/mongodb_distinct_command.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
database_name: str = Field(
title='Database Name',
description='Name of the MongoDB database'
)
collection_name: str = Field(
title='Collection Name',
description='Name of the MongoDB collection'
)
key: str = Field(
title='Name of field',
description='''
Name of the field for which we want to get the distinct values
'''
)
filter: dict = Field(
None,
title='Filter Query',
description='''
A query document that specifies the documents from which to retrieve the distinct values.
For eg: {"foo":"bar"}.
'''
)
def mongodb_distinct_command_printer(output):
if output is None:
return
print("\n\n")
if isinstance(output, List):
for entry in output:
pprint.pprint(entry)
else:
pprint.pprint(output)
def mongodb_distinct_command(
handle,
database_name: str,
collection_name: str,
key: str,
filter=None
) -> List:
"""mongodb_distinct_command Retrieves the documents present in the collection
and the count of the documents using count_documents().
:type handle: object
:param handle: Object returned from task.validate(...).
:type database_name: str
:param database_name: Name of the MongoDB database.
:type collection_name: str
:param collection_name: Name of the MongoDB collection.
:type key: str
:param key: Name of the field for which we want to get the distinct values.
:type filter Dict
:param filter: A query that matches the document to filter.
:rtype: All the results of the query.
"""
# Input param validation.
if filter is None:
filter = {}
try:
result = []
db = handle[database_name]
res = db[collection_name].distinct(key, filter)
for entry in res:
result.append(entry)
return result
except Exception as e:
return [e]
================================================
FILE: Mongo/legos/mongodb_find_document/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_find_document/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_find_document/mongodb_find_document.json
================================================
{
"action_title": "MongoDB Find Document",
"action_description": "MongoDB Find Document",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_find_document",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB", "CATEGORY_TYPE_MONGODB_COLLECTION","CATEGORY_TYPE_MONGODB_DOCUMENT"]
}
================================================
FILE: Mongo/legos/mongodb_find_document/mongodb_find_document.py
================================================
##
# Copyright (c) 2022 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List, Optional
from pydantic import BaseModel, Field
from unskript.enums.mongo_enums import FindCommands
from pymongo import ReturnDocument
from pymongo.errors import AutoReconnect, ServerSelectionTimeoutError
class InputSchema(BaseModel):
database_name: str = Field(
title='Database Name',
description='Name of the MongoDB database.'
)
collection_name: str = Field(
title='Collection Name',
description='Name of the MongoDB collection.'
)
command: Optional[FindCommands] = Field(
default=FindCommands.find,
title='Command',
description='''
Name of command
for Eg. find, etc
Supported commands : find, find_one_and_delete, find_one_and_replace, find_one_and_update
'''
)
filter: dict = Field(
title='Filter',
description=('A query that matches the document to update, delete, find '
'and replace. For eg: {"foo":"bar"}.')
)
document: Optional[dict] = Field(
default=None,
title='Update/Replace Document',
description='''
The modifications to apply in dictionary format.
For eg: For update : {"$set":{"field":"value"}} to Replace : {"field":"value"}
Not applicable for find, find_one and find_one_and_delete
'''
)
projection: Optional[dict] = Field(
default=None,
title='Projection ',
description='''
A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. {'_id': False})
''')
sort: Optional[list] = Field(
default=None,
title='Sort',
description='''
a list of {key:direction} pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is updated.
(e.g. [{'age': '-1'}])
'''
)
def mongodb_find_document_printer(output):
if isinstance(output, List):
if len(output) == 0:
print("No Matching Documents.")
return
for entry in output:
pprint.pprint(entry)
def mongodb_find_document(
handle,
database_name: str,
collection_name: str,
filter: dict,
command: FindCommands = FindCommands.find,
document: dict = None,
projection: dict = None,
sort: List = None) -> List:
"""mongodb_find_document Runs mongo find commands with the provided parameters.
:type handle: object
:param handle: Object returned from Task validate method
:type database_name: str
:param database_name: Name of the MongoDB database
:type collection_name: str
:param collection_name: Name of the MongoDB Collection to work on
:type filter: dict
:param filter: Filter in the dictionary form to work with
:type command: FindCommands
:param command: FindCommands Enum
:type document: dict
:param document: Document in the Dictionary form
:type projection: dict
:param projection: Projection in Dictionary form
:type sort: List
:param sort: Sort List to be used
:rtype: All the results of the query.
"""
# Lets make sure the handle that is returned is not stale
# and can connect to the MongoDB server
try:
handle.server_info()
except (AutoReconnect, ServerSelectionTimeoutError) as e:
print(f"[UNSKRIPT]: Reconnection / Server Selection Timeout Error: {str(e)}")
raise e
except Exception as e:
print(f"[UNSKRIPT]: Error Connecting: {str(e)}")
raise e
sort_by = sort
update = document
sort = []
if sort_by:
for val in sort_by:
for k, v in val.items():
sort.append((k, v))
result = []
try:
db = handle[database_name]
if command == FindCommands.find:
records = db[collection_name].find(
filter, projection=projection, sort=sort)
for record in records:
result.append(record)
elif command == FindCommands.find_one:
record = db[collection_name].find_one(
filter, projection=projection, sort=sort)
result.append(record)
elif command == FindCommands.find_one_and_delete:
record = db[collection_name].find_one_and_delete(
filter, projection=projection, sort=sort)
pprint.pprint("One matching document deleted")
result.append(record)
elif command == FindCommands.find_one_and_replace:
record = db[collection_name].find_one_and_replace(
filter, replacement=update, projection=projection,
sort=sort, return_document=ReturnDocument.AFTER)
pprint.pprint("One matching docuemnt replaced")
result.append(record)
elif command == FindCommands.find_one_and_update:
record = db[collection_name].find_one_and_update(
filter,
update=update,
projection=projection,
sort=sort,
return_document=ReturnDocument.AFTER
)
pprint.pprint("Document Updated")
result.append(record)
return result
except Exception as e:
raise e
================================================
FILE: Mongo/legos/mongodb_find_one/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_find_one/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_find_one/mongodb_find_one.json
================================================
{
"action_title": "MongoDB Find One",
"action_description": "MongoDB Find One returns a single entry that matches the query.",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_find_one",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB", "CATEGORY_TYPE_MONGODB_COLLECTION","CATEGORY_TYPE_MONGODB_DOCUMENT"]
}
================================================
FILE: Mongo/legos/mongodb_find_one/mongodb_find_one.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List, Optional
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
database_name: str = Field(
title='Database Name',
description='Name of the MongoDB database.'
)
collection_name: str = Field(
title='Collection Name',
description='Name of the MongoDB collection.'
)
filter: dict = Field(
title='Filter Query',
description='A query that matches the document to find. For eg: { "name": "mike" }.'
)
projection: Optional[dict] = Field(
default=None,
title='Projection',
description='''
A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. {'_id': false})
''')
sort: Optional[list] = Field(
default=None,
title='Sort',
description='''
a list of {key:direction} pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is updated.
(e.g. [{'age': '-1'}])
'''
)
def mongodb_find_one_printer(func):
def Printer(*args, **kwargs):
output = func(*args, **kwargs)
print("\n\n")
pprint.pprint(output)
return output
return Printer
@mongodb_find_one_printer
def mongodb_find_one(
handle,
database_name: str,
collection_name: str,
filter: dict,
projection: dict = None,
sort: List = None) -> dict:
"""mongodb_find_one and returns .
:type handle: object
:param handle: Object returned from task.validate(...).
:type database_name: str
:param database_name: Name of the MongoDB database.
:type collection_name: str
:param collection_name: Name of the MongoDB collection.
:type match_query: Dict
:param match_query: The selection criteria for the update in dictionary format.
:type filter: Dict
:param filter: A query that matches the document to find.
:type projection: Dict
:param projection: A list of field names that should be returned/excluded in the result.
:type sort: list
:param sort: A list of {key:direction} pairs.
:rtype: Dict of matched query result.
"""
try:
db = handle[database_name]
r = db[collection_name].find_one(
filter, projection=projection, sort=sort if sort else None)
return r or {}
except Exception as e:
return {"error" : str(e)}
================================================
FILE: Mongo/legos/mongodb_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_get_handle/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_get_handle/mongodb_get_handle.json
================================================
{
"action_title": "Get MongoDB Handle",
"action_description": "Get MongoDB Handle",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_supports_iteration": false
}
================================================
FILE: Mongo/legos/mongodb_get_handle/mongodb_get_handle.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def mongodb_get_handle(handle):
"""mongodb_get_handle returns the mongo client.
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: mongo client.
"""
return handle
================================================
FILE: Mongo/legos/mongodb_get_metrics/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_get_metrics/__init__.py
================================================
================================================
FILE: Mongo/legos/mongodb_get_metrics/mongodb_get_metrics.json
================================================
{
"action_title": "MongoDB get metrics",
"action_description": "This action retrieves various metrics such as index size, disk size per collection for all databases and collections.",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_get_metrics",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_INFORMATION" , "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB"]
}
================================================
FILE: Mongo/legos/mongodb_get_metrics/mongodb_get_metrics.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import Tuple
from pydantic import BaseModel
from tabulate import tabulate
class InputSchema(BaseModel):
pass
def mongodb_get_metrics_printer(output):
if not output:
return
total_memory, index_outputs = output
if total_memory:
print(f"Total Memory: {total_memory[0].get('Memory (MB)')} MB")
print(tabulate(index_outputs, headers="keys"))
def mongodb_get_metrics(handle) -> Tuple:
"""
mongodb_get_metrics retrieves various metrics such as index size,
disk size per collection for all databases and collections.
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: list of dictionaries with index size, storage size metrics and total memory usage in MB
"""
index_metrics = []
database_metrics = []
try:
database_names = handle.list_database_names()
server_status = handle.admin.command("serverStatus")
total_memory_MB = server_status['mem']['resident'] # Get the total resident set size in memory
database_metrics.append({
'Database': 'ALL',
'Collection': 'ALL',
'Memory (MB)': total_memory_MB,
})
for db_name in database_names:
db = handle[db_name]
collection_names = [coll['name'] for coll in db.list_collections() if not coll['options'].get('viewOn')]
for coll_name in collection_names:
stats = db.command("collstats", coll_name)
index_size_KB = sum(stats.get('indexSizes', {}).values())/ 1024 # Convert bytes to KB
storage_size_KB = stats.get('storageSize', 0)/ 1024 # Convert bytes to KB
index_metrics.append({
'Database': db_name,
'Collection': coll_name,
'Index Size (KB)': index_size_KB,
'Storage Size (KB)': storage_size_KB,
})
except Exception as e:
raise e
return database_metrics, index_metrics
================================================
FILE: Mongo/legos/mongodb_get_replica_set/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_get_replica_set/__init__.py
================================================
================================================
FILE: Mongo/legos/mongodb_get_replica_set/mongodb_get_replica_set.json
================================================
{
"action_title": "Get MongoDB replica set",
"action_description": "This action retrieves the primary replica and a list of secondary replicas from a MongoDB replica set.",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_get_replica_set",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_INFORMATION" ]
}
================================================
FILE: Mongo/legos/mongodb_get_replica_set/mongodb_get_replica_set.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import List
from pydantic import BaseModel
from tabulate import tabulate
class InputSchema(BaseModel):
pass
def mongodb_get_replica_set_printer(output):
if output is None:
print("No data found")
return
headers = ["Replica Name", "Role"]
table = [(o['name'], o['role']) for o in output]
print(tabulate(table, headers=headers, tablefmt='grid'))
def mongodb_get_replica_set(handle) -> List:
"""
mongodb_get_replica_set retrieves the primary replica and a list of secondary replicas from a MongoDB replica set.
:type handle: object
:param handle: Object of type unskript connector to connect to MongoDB client
:return: A list of dictionaries where each dictionary contains the name of the replica and its role.
"""
replica_status = handle.admin.command("replSetGetStatus")
replicas = []
for member in replica_status['members']:
role = member['stateStr']
replicas.append({
'name': member['name'],
'role': role
})
return replicas
================================================
FILE: Mongo/legos/mongodb_get_server_status/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_get_server_status/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_get_server_status/mongodb_get_server_status.json
================================================
{
"action_title": "Get Mongo Server Status",
"action_description": "Status indicating server reachability of mongo server",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_get_server_status",
"action_needs_credential": true,
"action_supports_poll": true,
"action_is_check": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB"],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Mongo/legos/mongodb_get_server_status/mongodb_get_server_status.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import Tuple, Optional
from pydantic import BaseModel, Field
from tabulate import tabulate
class InputSchema(BaseModel):
pass
def mongodb_get_server_status_printer(output):
if output[0]:
print("MongoDB Server Status: Reachable")
else:
print("MongoDB Server Status: Unreachable")
if output[1]:
print(f"Error: {output[1]}")
def mongodb_get_server_status(handle) -> Tuple:
"""Returns the status of the MongoDB instance.
:type handle: object
:param handle: MongoDB connection object
:return: Status indicating server reachability of mongo server
"""
try:
# Check server reachability
result = handle.admin.command("ping")
if result and result.get("ok"):
return (True, None)
except Exception as e:
return (False, str(e))
return (False, {"message":"Unable to check Mongo server status"})
================================================
FILE: Mongo/legos/mongodb_get_write_conflicts/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_get_write_conflicts/__init__.py
================================================
================================================
FILE: Mongo/legos/mongodb_get_write_conflicts/mongodb_get_write_conflicts.json
================================================
{
"action_title": "Get MongoDB potential write conflicts",
"action_description": "This action retrieves potential write conflict metrics from the serverStatus command.",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_get_write_conflicts",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": ["CATEGORY_TYPE_INFORMATION" ]
}
================================================
FILE: Mongo/legos/mongodb_get_write_conflicts/mongodb_get_write_conflicts.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from typing import Dict
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def mongodb_get_write_conflicts_printer(output):
if output is None:
return
print("Potential Write Conflicts:", output.get("totalWriteConflicts", "N/A"))
def mongodb_get_write_conflicts(handle) -> Dict:
"""
mongodb_get_write_conflicts Retrieves potential write conflict metrics from the serverStatus command.
:type handle: object
:param handle: Object of type unskript connector to connect to MongoDB client
:return: A dictionary containing metrics related to potential write conflicts.
"""
server_status = handle.admin.command("serverStatus")
write_conflict_metrics = {
"totalWriteConflicts": server_status.get("wiredTiger", {}).get("concurrentTransactions", {}).get("write", {}).get("out", 0)
}
return write_conflict_metrics
================================================
FILE: Mongo/legos/mongodb_insert_document/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_insert_document/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_insert_document/mongodb_insert_document.json
================================================
{
"action_title": "MongoDB Insert Document",
"action_description": "MongoDB Insert Document",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_insert_document",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB", "CATEGORY_TYPE_MONGODB_COLLECTION","CATEGORY_TYPE_MONGODB_DOCUMENT"]
}
================================================
FILE: Mongo/legos/mongodb_insert_document/mongodb_insert_document.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
import pymongo
class InputSchema(BaseModel):
database_name: str = Field(
title='Database Name',
description='Name of the MongoDB database.'
)
collection_name: str = Field(
title='Collection Name',
description='Name of the MongoDB collection.'
)
documents: list = Field(
title='Documents',
description='''
An array of documents to insert into the collection.
For eg. Fo [ {"foo": "bar"} ... ]
'''
)
def mongodb_insert_document_printer(output):
if output is None:
return
if isinstance(output, List):
if len(output) == 0:
print("No Documents Inserted.")
return
print(f"Inserted {len(output)} Documents with IDs: ")
for entry in output:
pprint.pprint(entry)
def mongodb_insert_document(
handle,
database_name: str,
collection_name: str,
documents: list
) -> List:
"""mongodb_insert_document Runs mongo insert commands with the provided parameters.
:type handle: object
:param handle: Object returned from the Task Validate method
:type database_name: str
:param database_name: Name of the MongoDB database
:type collection_name: str
:param collection_name: Collection name in the MongoDB database
:type document: list
:param document: Document to be inserted in the MongoDB collection
:rtype: List containing Insert IDs
"""
# Input param validation.
# Lets make sure the handle that is returned is not stale
# and can connect to the MongoDB server
try:
handle.server_info()
except (pymongo.errors.AutoReconnect, pymongo.errors.ServerSelectionTimeoutError) as e:
print("[UNSKRIPT]: Reconnection / Server Selection Timeout Error: ", str(e))
raise e
except Exception as e:
print("[UNSKRIPT]: Error Connecting: ", str(e))
raise e
try:
db = handle[database_name]
res = db[collection_name].insert_many(documents)
return res.inserted_ids
except Exception as e:
print("[UNSKRIPT]: Error while Inserting Document(s): ", str(e))
raise e
================================================
FILE: Mongo/legos/mongodb_kill_queries/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_kill_queries/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_kill_queries/mongodb_kill_queries.json
================================================
{
"action_title": "MongoDB kill queries",
"action_description": "MongoDB kill queries",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_kill_queries",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB","CATEGORY_TYPE_MONGODB_QUERY"]
}
================================================
FILE: Mongo/legos/mongodb_kill_queries/mongodb_kill_queries.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
op_id: int = Field(
title='An operation ID',
description='Kill the operation based on opid'
)
def mongodb_kill_queries_printer(output):
if output is None:
return
print("\n\n")
pprint.pprint(output)
def mongodb_kill_queries(handle, op_id: int) -> Dict:
"""mongodb_kill_queries can kill queries (read operations) that
are running on more than one shard in a cluster.
:type handle: object
:param handle: Object returned from task.validate(...).
:type database_name: str
:param database_name: An operation ID.
:rtype: All the results of the query.
"""
# Input param validation.
try:
resp = handle.admin.command("killOp", op=op_id)
return resp
except Exception as e:
return {"Error": e}
================================================
FILE: Mongo/legos/mongodb_list_collections/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_list_collections/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_list_collections/mongodb_list_collections.json
================================================
{
"action_title": "Get list of collections in MongoDB Database",
"action_description": "Get list of collections in MongoDB Database",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_list_collections",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB", "CATEGORY_TYPE_MONGODB_COLLECTION"]
}
================================================
FILE: Mongo/legos/mongodb_list_collections/mongodb_list_collections.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from pymongo.errors import InvalidName
class InputSchema(BaseModel):
database_name: str = Field(
title='Database Name',
description='Name of the MongoDB database'
)
def mongodb_list_collections_printer(output):
if output is None:
return
print("\n\n")
if isinstance(output, Exception):
pprint.pprint(output._message)
else:
pprint.pprint("List of collections in DB")
pprint.pprint(output)
def mongodb_list_collections(handle, database_name: str) -> List:
"""mongodb_list_collections Returns list of all collection in MongoDB
:type handle: object
:param handle: Object returned from task.validate(...).
:type database_name: str
:param database_name: Name of the MongoDB database.
:rtype: All the results of the query.
"""
# Input param validation.
try:
db = handle[database_name]
collection_list = db.list_collection_names()
return collection_list
except InvalidName as e:
return [e]
================================================
FILE: Mongo/legos/mongodb_list_databases/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_list_databases/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_list_databases/mongodb_list_databases.json
================================================
{
"action_title": "Get list of MongoDB Databases",
"action_description": "Get list of MongoDB Databases",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_list_databases",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB"]
}
================================================
FILE: Mongo/legos/mongodb_list_databases/mongodb_list_databases.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel
from pymongo.errors import AutoReconnect, ServerSelectionTimeoutError
class InputSchema(BaseModel):
pass
def mongodb_list_databases_printer(output):
if output is None:
return
print("\n\n")
pprint.pprint("List of databases")
pprint.pprint(output)
def mongodb_list_databases(handle) -> List:
"""mongodb_list_databases Returns list of all databases in MongoDB
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: List All the databases in mongodb.
"""
# Lets make sure the handle that is returned is not stale
# and can connect to the MongoDB server
try:
handle.server_info()
except (AutoReconnect, ServerSelectionTimeoutError) as e:
print("[UNSKRIPT]: Reconnection / Server Selection Timeout Error: ", str(e))
raise e
except Exception as e:
print("[UNSKRIPT]: Error Connecting: ", str(e))
raise e
dblist = handle.list_database_names()
return dblist
================================================
FILE: Mongo/legos/mongodb_list_queries/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_list_queries/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_list_queries/mongodb_list_queries.json
================================================
{
"action_title": "MongoDB list queries",
"action_description": "MongoDB list queries",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_list_queries",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB", "CATEGORY_TYPE_MONGODB_QUERY"]
}
================================================
FILE: Mongo/legos/mongodb_list_queries/mongodb_list_queries.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def mongodb_list_queries_printer(output):
if output is None:
return
print("\n\n")
if isinstance(output, Exception):
pprint.pprint(f"Error : {output}")
else:
pprint.pprint(output['inprog'])
def mongodb_list_queries(handle) -> Dict:
"""mongodb_list_queries can returns information on all the operations running.
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: Dict All the results of the query.
"""
try:
resp = handle.admin.command(({"currentOp": True}))
return resp
except Exception as e:
return {"Error": e}
================================================
FILE: Mongo/legos/mongodb_read_query/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_read_query/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_read_query/mongodb_read_query.json
================================================
{
"action_title": "MongoDB Read Query",
"action_description": "MongoDB Read Query",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_read_query",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB", "CATEGORY_TYPE_MONGODB_QUERY"]
}
================================================
FILE: Mongo/legos/mongodb_read_query/mongodb_read_query.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
database_name: str = Field(
title='Database Name',
description='Name of the MongoDB database.'
)
collection_name: str = Field(
title='Collection Name',
description='Name of the MongoDB collection.'
)
query: dict = Field(
title='Read Query',
description='Read only query in dictionary format. For eg: {"foo":"bar"}.'
)
def mongodb_read_query_printer(output):
if output is None:
return
print("\n\n")
for entry in output:
pprint.pprint(entry)
def mongodb_read_query(handle, database_name: str, collection_name: str, query: dict) -> List:
"""mongodb_read_query Runs mongo query with the provided parameters.
:type handle: object
:param handle: Object returned from task.validate(...).
:type database_name: str
:param database_name: Name of the MongoDB database.
:type collection_name: str
:param collection_name: Name of the MongoDB collection.
:type query: Dict
:param query: Read only query in dictionary format.
:rtype: All the results of the query.
"""
try:
res = handle[database_name][collection_name].find(query)
except Exception as e:
return [e]
result = []
for entry in res:
result.append(entry)
return result
================================================
FILE: Mongo/legos/mongodb_remove_field_in_collections/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_remove_field_in_collections/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_remove_field_in_collections/mongodb_remove_field_in_collections.json
================================================
{
"action_title": "MongoDB remove a field in all collections",
"action_description": "MongoDB remove a field in all collections",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_remove_field_in_collections",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB", "CATEGORY_TYPE_MONGODB_COLLECTION"]
}
================================================
FILE: Mongo/legos/mongodb_remove_field_in_collections/mongodb_remove_field_in_collections.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
database_name: str = Field(
title='Database Name',
description='Name of the MongoDB database.'
)
collection_name: str = Field(
title='Collection Name',
description='Name of the MongoDB collection.'
)
remove_fields: dict = Field(
title='Remove fields from every document',
description='''
The Removal of field apply in dictionary format.
For eg: {"field":"value"}.
'''
)
upsert: bool = Field(
True,
title='Upsert',
description='Allow creation of a new document, if one does not exist.'
)
def mongodb_remove_field_in_collections_printer(output):
if output is None:
return
print("\n\n")
if isinstance(output, Exception):
pprint.pprint(f"Error : {output}")
else:
for entry in output:
pprint.pprint(entry)
def mongodb_remove_field_in_collections(
handle,
database_name: str,
collection_name: str,
remove_fields: dict,
upsert: bool = True
) -> List:
"""mongodb_remove_field_in_collections Remove field from every document in a MongoDB collection.
:type handle: object
:param handle: Object returned from task.validate(...).
:type database_name: str
:param database_name: Name of the MongoDB database.
:type collection_name: str
:param collection_name: Name of the MongoDB collection.
:type remove_fields: Dict
:param remove_fields: Remove fields from every document.
:type upsert: bool
:param upsert: Allow creation of a new document, if one does not exist.
:rtype: string with the objectID.
"""
# Input param validation.
modifications = {"$unset": remove_fields}
try:
handle[database_name][collection_name].update_many(
{},
update=modifications,
upsert=upsert)
res = handle[database_name][collection_name].find()
result = []
for entry in res:
result.append(entry)
return result
except Exception as e:
return [e]
================================================
FILE: Mongo/legos/mongodb_rename_database/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_rename_database/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_rename_database/mongodb_rename_database.json
================================================
{
"action_title": "MongoDB Rename Database",
"action_description": "MongoDB Rename Database",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_rename_database",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB"]
}
================================================
FILE: Mongo/legos/mongodb_rename_database/mongodb_rename_database.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import os
import pprint
from typing import List
import bson
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
old_database_name: str = Field(
title='Old Database Name',
description='''
Name of the MongoDB database that user want to change.
Warning : This solution is not suitable for big or complex databases
'''
)
new_database_name: str = Field(
title='New Database Name',
description='''
New name of the MongoDB database.
Warning : This solution is not suitable for big or complex databases
'''
)
def mongodb_rename_database_printer(output):
if output is None:
return
print("\n\n")
if isinstance(output, Exception):
pprint.pprint(f"Error : {output}")
else:
pprint.pprint("List of databases after renaming")
pprint.pprint(output)
def mongodb_rename_database(handle, old_database_name: str, new_database_name: str) -> List:
"""mongodb_rename_database rename database in mongodb.
:type handle: object
:param handle: Object returned from task.validate(...).
:type old_database_name: str
:param old_database_name: Name of the MongoDB database that user want to change.
:type new_database_name: str
:param new_database_name: New name of the MongoDB database.
:rtype: All the results of the query.
"""
def dump(collections, conn, db_name, path):
"""
MongoDB Dump
:param collections: Database collections name
:param conn: MongoDB client connection
:param db_name: Database name
:param path:
:return:
"""
try:
db = conn[db_name]
for coll in collections:
with open(os.path.join(path, f'{coll}.bson'), 'wb+') as f:
for doc in db[coll].find():
f.write(bson.BSON.encode(doc))
return True
except Exception as e:
raise e
def restore(path, conn, db_name):
"""
MongoDB Restore
:param path: Database dumped path
:param conn: MongoDB client connection
:param db_name: Database name
:return:
"""
try:
db = conn[db_name]
for coll in os.listdir(path):
if coll.endswith('.bson'):
with open(os.path.join(path, coll), 'rb+') as f:
db[coll.split('.')[0]].insert_many(bson.decode_all(f.read()))
return True
except Exception as e:
raise e
finally:
for coll in os.listdir(path):
if coll.endswith('.bson'):
os.remove(os.path.join(path, coll))
# Input param validation.
try:
db = handle[old_database_name]
collection_list = db.list_collection_names()
path = "/tmp/"
# Steps 1 : Take a dump of old db
is_backup = dump(collection_list, handle, old_database_name, path)
# Step 2 : Restore the same dum in new db
is_restore = False
if is_backup:
is_restore = restore(path, handle, new_database_name)
# Step 3 : Drop the old Db
if is_restore:
handle.drop_database(old_database_name)
# Verification
dblist = handle.list_database_names()
if new_database_name not in dblist:
return [Exception("Error Occured !!!")]
return dblist
except Exception as e:
return [e]
================================================
FILE: Mongo/legos/mongodb_update_document/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_update_document/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_update_document/mongodb_update_document.json
================================================
{
"action_title": "MongoDB Update Document",
"action_description": "MongoDB Update Document",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_update_document",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_INT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB", "CATEGORY_TYPE_MONGODB_COLLECTION","CATEGORY_TYPE_MONGODB_DOCUMENT"]
}
================================================
FILE: Mongo/legos/mongodb_update_document/mongodb_update_document.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel, Field
from unskript.enums.mongo_enums import UpdateCommands
from pymongo.errors import AutoReconnect, ServerSelectionTimeoutError
class InputSchema(BaseModel):
database_name: str = Field(
title='Database Name',
description='Name of the MongoDB database.'
)
collection_name: str = Field(
title='Collection Name',
description='Name of the MongoDB collection.'
)
command: UpdateCommands = Field(
UpdateCommands.update_one,
title='Command',
description='''
Db command
for Eg. update_one, update_many
Supported commands : update_one and update_many
'''
)
filter: dict = Field(
title='Filter',
description='A query that matches the document to update. For eg: {"foo":"bar"}.'
)
new_values: dict = Field(
title='Update new fields to every document',
description='''
The addition of fields apply in dictionary format.
For eg: { "$set": { "field": "value" } }
'''
)
upsert: bool = Field(
True,
title='Upsert',
description='Allow creation of a new document, if one does not exist.'
)
def mongodb_update_document_printer(output):
if output is None:
return
print("\n")
if output == 0:
print("No Documents Updated")
elif output > 1:
print(f"Updated {output} Documents")
else:
print("Updated Given Document")
def mongodb_update_document(
handle,
database_name: str,
collection_name: str,
filter: dict,
new_values: dict,
command: UpdateCommands = UpdateCommands.update_one,
upsert: bool = True) -> int:
"""mongodb_write_query Updates/creates an entry.
:type handle: object
:param handle: Object returned from task.validate(...).
:type database_name: str
:param database_name: Name of the MongoDB database.
:type collection_name: str
:param collection_name: Name of the MongoDB collection.
:type filter: Dict
:param filter: A query that matches the document to update.
:type new_values: Dict
:param new_values: Update new fields to every document
:type command: UpdateCommands
:param command: Db command.
:type upsert: bool
:param upsert: Allow creation of a new document, if one does not exist.
:rtype: int of updated document
"""
# Input param validation.
# Lets make sure the handle that is returned is not stale
# and can connect to the MongoDB server
try:
handle.server_info()
except (AutoReconnect, ServerSelectionTimeoutError) as e:
print("[UNSKRIPT]: Reconnection / Server Selection Timeout Error: ", str(e))
raise e
except Exception as e:
print("[UNSKRIPT]: Error Connecting: ", str(e))
raise e
try:
record = None
result = 0
db = handle[database_name]
if command == UpdateCommands.update_one:
record = db[collection_name].update_one(filter, new_values, upsert=upsert)
elif command == UpdateCommands.update_many:
record = db[collection_name].update_many(
filter, new_values, upsert=upsert)
if record is not None:
result = record.modified_count
return result
except Exception as e:
raise e
================================================
FILE: Mongo/legos/mongodb_write_query/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Mongo/legos/mongodb_write_query/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Mongo/legos/mongodb_write_query/mongodb_write_query.json
================================================
{
"action_title": "MongoDB Upsert Query",
"action_description": "MongoDB Upsert Query",
"action_type": "LEGO_TYPE_MONGODB",
"action_entry_function": "mongodb_write_query",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MONGODB", "CATEGORY_TYPE_MONGODB_QUERY"]
}
================================================
FILE: Mongo/legos/mongodb_write_query/mongodb_write_query.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
database_name: str = Field(
title='Database Name',
description='Name of the MongoDB database.'
)
collection_name: str = Field(
title='Collection Name',
description='Name of the MongoDB collection.'
)
match_query: dict = Field(
title='Match Query',
description=('The selection criteria for the update in '
'dictionary format. For eg: {"foo":"bar"}.')
)
update: dict = Field(
title='Update Document',
description='''The modifications to apply in dictionary format.
For eg: { "$set": { "field": "value" } }.'''
)
upsert: bool = Field(
True,
title='Upsert',
description='Allow creation of a new document, if one does not exist.'
)
def mongodb_write_query_printer(output):
if output is None:
return
print("\n\n")
if "error" in output:
print(f'Error : {output["error"]}')
print(
f'MatchedCount: {output["matched_count"]}, ModifiedCount: {output["modified_count"]}')
def mongodb_write_query(
handle,
database_name: str,
collection_name: str,
match_query: dict,
update: dict,
upsert: bool = True
) -> Dict:
"""mongodb_write_query Updates/creates an entry.
:type handle: object
:param handle: Object returned from task.validate(...).
:type database_name: str
:param database_name: Name of the MongoDB database.
:type collection_name: str
:param collection_name: Name of the MongoDB collection.
:type match_query: Dict
:param match_query: The selection criteria for the update in dictionary format.
:type update: Dict
:param update: The modifications to apply in dictionary format.
:type upsert: bool
:param upsert: Allow creation of a new document, if one does not exist.
:rtype: Dict of Updated/created entry object.
"""
# Input param validation.
result = {}
try:
res = handle[database_name][collection_name].update_many(
filter=match_query,
update=update,
upsert=upsert)
result["matched_count"] = res.matched_count
result["modified_count"] = res.modified_count
except Exception as e:
raise e
# this is an object
return result
================================================
FILE: MsSQL/README.md
================================================
# MsSQL Actions
* [Get MS-SQL Handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/MsSQL/legos/mssql_get_handle/README.md): Get MS-SQL Handle
* [MS-SQL Read Query](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/MsSQL/legos/mssql_read_query/README.md): MS-SQL Read Query
* [MS-SQL Write Query](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/MsSQL/legos/mssql_write_query/README.md): MS-SQL Write Query
================================================
FILE: MsSQL/__init__.py
================================================
================================================
FILE: MsSQL/legos/__init__.py
================================================
================================================
FILE: MsSQL/legos/mssql_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: MsSQL/legos/mssql_get_handle/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: MsSQL/legos/mssql_get_handle/mssql_get_handle.json
================================================
{
"action_title": "Get MS-SQL Handle",
"action_description": "Get MS-SQL Handle",
"action_type": "LEGO_TYPE_MSSQL",
"action_entry_function": "mssql_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_output_type": "ACTION_OUTPUT_TYPE_NONE",
"action_supports_iteration": false
}
================================================
FILE: MsSQL/legos/mssql_get_handle/mssql_get_handle.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def mssql_get_handle(handle):
"""mssql_get_handle retuns the handle of MSSQL.
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: handle of MSSQL.
"""
return handle
================================================
FILE: MsSQL/legos/mssql_read_query/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: MsSQL/legos/mssql_read_query/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: MsSQL/legos/mssql_read_query/mssql_read_query.json
================================================
{
"action_title": "MS-SQL Read Query",
"action_description": "MS-SQL Read Query",
"action_type": "LEGO_TYPE_MSSQL",
"action_entry_function": "mssql_read_query",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MSSQL", "CATEGORY_TYPE_MSSQL_QUERY"]
}
================================================
FILE: MsSQL/legos/mssql_read_query/mssql_read_query.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from typing import Tuple, List
from pydantic import BaseModel, Field
from tabulate import tabulate
class InputSchema(BaseModel):
query: str = Field(
title='Read Query',
description='Read query eg: select * from test;')
params: Tuple = Field(
None,
title='Parameters',
description='Parameters to the query in list format. For eg: [1, 2, "abc"]')
def mssql_read_query_printer(output):
if output is None:
return
print('\n')
print(tabulate(output))
def mssql_read_query(handle, query: str, params: Tuple = ()) -> List:
"""mssql_read_query Runs mssql query with the provided parameters.
:type handle: object
:param handle: Object returned from task.validate(...).
:type query: str
:param query: MSSQL read query.
:type params: Tuple
:param params: Parameters to the query in Tuple format.
:rtype: List result of the query.
"""
cur = handle.cursor()
if params:
cur.execute(query, params)
else:
cur.execute(query)
res = cur.fetchall()
cur.close()
handle.close()
return res
================================================
FILE: MsSQL/legos/mssql_write_query/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: MsSQL/legos/mssql_write_query/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: MsSQL/legos/mssql_write_query/mssql_write_query.json
================================================
{
"action_title": "MS-SQL Write Query",
"action_description": "MS-SQL Write Query",
"action_type": "LEGO_TYPE_MSSQL",
"action_entry_function": "mssql_write_query",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_NONE",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MSSQL", "CATEGORY_TYPE_MSSQL_QUERY"]
}
================================================
FILE: MsSQL/legos/mssql_write_query/mssql_write_query.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from typing import List, Any
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
query: str = Field(
title='Write Query',
description='Query to insert/update')
params: List = Field(
None,
title='Parameters',
description='Parameters to the query in list format. For eg: [1, 2, "abc"]')
def mssql_write_query(handle, query: str, params: List = List[Any]) -> None:
"""mssql_write_query Runs mssql query with the provided parameters.
:type handle: object
:param handle: Object returned from task.validate(...).
:type query: str
:param query: MSSQL insert/update query.
:type params: List
:param params: Parameters to the query in list format.
:rtype: None if success. Exception on error.
"""
cur = handle.cursor()
if params:
cur.execute(query, params)
else:
cur.execute(query)
handle.commit()
cur.close()
handle.close()
================================================
FILE: MySQL/README.md
================================================
# MySQL Actions
* [Get MySQL Handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/MySQL/legos/mysql_get_handle/README.md): Get MySQL Handle
* [MySQl Get Long Running Queries](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/MySQL/legos/mysql_get_long_run_queries/README.md): MySQl Get Long Running Queries
* [MySQl Kill Query](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/MySQL/legos/mysql_kill_query/README.md): MySQl Kill Query
* [Run MySQL Query](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/MySQL/legos/mysql_read_query/README.md): Run MySQL Query
* [Create a MySQL Query](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/MySQL/legos/mysql_write_query/README.md): Create a MySQL Query
================================================
FILE: MySQL/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: MySQL/legos/__init__.py
================================================
================================================
FILE: MySQL/legos/mysql_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: MySQL/legos/mysql_get_handle/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: MySQL/legos/mysql_get_handle/mysql_get_handle.json
================================================
{
"action_title": "Get MySQL Handle",
"action_description": "Get MySQL Handle",
"action_type": "LEGO_TYPE_MYSQL",
"action_entry_function": "mysql_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_output_type": "ACTION_OUTPUT_TYPE_NONE",
"action_supports_iteration": false
}
================================================
FILE: MySQL/legos/mysql_get_handle/mysql_get_handle.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def mysql_get_handle(handle):
"""mysql_get_handle returns the mysql connection handle.
:rtype: mysql Handle.
"""
return handle
================================================
FILE: MySQL/legos/mysql_get_long_run_queries/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: MySQL/legos/mysql_get_long_run_queries/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: MySQL/legos/mysql_get_long_run_queries/mysql_get_long_run_queries.json
================================================
{
"action_title": "MySQl Get Long Running Queries",
"action_description": "MySQl Get Long Running Queries",
"action_type": "LEGO_TYPE_MYSQL",
"action_entry_function": "mysql_get_long_run_queries",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MYSQL", "CATEGORY_TYPE_MYSQL_QUERY"]
}
================================================
FILE: MySQL/legos/mysql_get_long_run_queries/mysql_get_long_run_queries.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from typing import List
from pydantic import BaseModel, Field
from tabulate import tabulate
class InputSchema(BaseModel):
interval: int = Field(
default=5,
title='Interval(in seconds)',
description='Return queries running longer than this interval')
def mysql_read_query_printer(output):
if output is None:
return
print('\n')
print(tabulate(output))
def mysql_get_long_run_queries(handle, interval: int = 5) -> List:
"""mysql_get_long_run_queries Runs returns information on all the MySQL long running queries.
:type handle: object
:param handle: Object returned from task.validate(...).
:type interval: int
:param interval: Integer value to filter queries which runs above interval time.
:rtype: Result of the query in the List form.
"""
# Get long running queries
try:
query = ("SELECT PROCESSLIST_ID, PROCESSLIST_INFO FROM performance_schema.threads "
f"WHERE PROCESSLIST_COMMAND = 'Query' AND PROCESSLIST_TIME >= {interval};")
cur = handle.cursor()
cur.execute(query)
res = cur.fetchall()
cur.close()
handle.close()
return res
except Exception as e:
return {"Error": e}
================================================
FILE: MySQL/legos/mysql_kill_query/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: MySQL/legos/mysql_kill_query/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: MySQL/legos/mysql_kill_query/mysql_kill_query.json
================================================
{
"action_title": "MySQl Kill Query",
"action_description": "MySQl Kill Query",
"action_type": "LEGO_TYPE_MYSQL",
"action_entry_function": "mysql_kill_query",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MYSQL", "CATEGORY_TYPE_MYSQL_QUERY"]
}
================================================
FILE: MySQL/legos/mysql_kill_query/mysql_kill_query.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
processId: int = Field(
title='An processId',
description='Kill the process based on processId'
)
def mysql_kill_query_printer(output):
if output is None:
return
print("\n\n")
pprint.pprint(output)
def mysql_kill_query(handle, processId: int) -> str:
"""mysql_kill_query can kill queries (read process) that are running more or
equal than given interval.
:type handle: object
:param handle: Object returned by task.validate(...).
:type processId: int
:param processId: Process ID as integer that needs to be killed
:rtype: Result of the kill %d process for the given processId in a str form.
"""
# Kill long running queries using processId
try:
query = f"kill {processId};"
cur = handle.cursor()
cur.execute(query)
res = cur.fetchall()
cur.close()
handle.close()
return res
except Exception as e:
return {"Error": e}
================================================
FILE: MySQL/legos/mysql_read_query/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: MySQL/legos/mysql_read_query/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: MySQL/legos/mysql_read_query/mysql_read_query.json
================================================
{
"action_title": "Run MySQL Query",
"action_description": "Run MySQL Query",
"action_type": "LEGO_TYPE_MYSQL",
"action_entry_function": "mysql_read_query",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_MYSQL", "CATEGORY_TYPE_MYSQL_QUERY"]
}
================================================
FILE: MySQL/legos/mysql_read_query/mysql_read_query.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List, Any
from pydantic import BaseModel, Field
from tabulate import tabulate
class InputSchema(BaseModel):
query: str = Field(
title='Read Query',
description='MySQL get query.')
params: List = Field(
None,
title='Parameters',
description='Parameters to the query in list format. For eg: [1, 2, "abc"]')
def mysql_read_query_printer(output):
if output is None:
return
print('\n')
pprint.pprint(tabulate(output))
def mysql_read_query(handle, query: str, params: List = List[Any]) -> List:
"""mysql_read_query Runs mysql query with the provided parameters.
:type handle: object
:param handle: Object returned from task.validate(...).
:type query: str
:param query: MySQL get query.
:type params: List
:param params: Parameters to the query in list format.
:rtype: List of the results of the query.
"""
# Input param validation.
cur = handle.cursor()
cur.execute(query, params)
res = cur.fetchall()
cur.close()
handle.close()
return res
================================================
FILE: MySQL/legos/mysql_write_query/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Netbox/legos/netbox_list_devices/__init__.py
================================================
================================================
FILE: Netbox/legos/netbox_list_devices/netbox_list_devices.json
================================================
{
"action_title": "Netbox List Devices",
"action_description": "List all Netbox devices",
"action_type": "LEGO_TYPE_NETBOX",
"action_entry_function": "netbox_list_devices",
"action_needs_credential": true,
"action_supports_poll": false,
"action_supports_iteration": false,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_NETBOX"]
}
================================================
FILE: Netbox/legos/netbox_list_devices/netbox_list_devices.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def netbox_list_devices_printer(output):
if output is None:
return
pprint.pprint(output)
def netbox_list_devices(handle):
"""netbox_list_devices returns the Netbox devices.
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: List of netbox devices.
"""
result = handle.dcim.devices.all()
return result
================================================
FILE: Nomad/README.md
================================================
# Nomad Actions
* [Nomad Get Handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Nomad/legos/nomad_get_handle/README.md): Get Nomad Handle
* [Nomad List Jobs](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Nomad/legos/nomad_list_jobs/README.md): List all Nomad jobs
================================================
FILE: Nomad/__init__.py
================================================
================================================
FILE: Nomad/legos/__init__.py
================================================
================================================
FILE: Nomad/legos/nomad_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Opsgenie/legos/opsgenie_get_handle/__init__.py
================================================
================================================
FILE: Opsgenie/legos/opsgenie_get_handle/opsgenie_get_handle.json
================================================
{
"action_title": "Get Opsgenie Handle",
"action_description": "Get Opsgenie Handle",
"action_type": "LEGO_TYPE_OPSGENIE",
"action_entry_function": "opsgenie_get_handle",
"action_needs_credential": true,
"action_supports_iteration": false,
"action_supports_poll": false
}
================================================
FILE: Opsgenie/legos/opsgenie_get_handle/opsgenie_get_handle.py
================================================
##
## Copyright (c) 2023 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def opsgenie_get_handle_printer(output):
if output is None:
return
print(output)
def opsgenie_get_handle(handle):
"""opsgenie_get_handle returns the nomad handle.
:rtype: Opsgenie handle.
"""
return handle
================================================
FILE: Pingdom/README.md
================================================
# Pingdom Actions
* [Create new maintenance window.](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Pingdom/legos/pingdom_create_new_maintenance_window/README.md): Create new maintenance window.
* [Perform Pingdom single check](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Pingdom/legos/pingdom_do_single_check/README.md): Perform Pingdom Single Check
* [Get Pingdom Analysis Results for a specified Check](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Pingdom/legos/pingdom_get_analysis/README.md): Get Pingdom Analysis Results for a specified Check
* [Get list of checkIDs given a hostname](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Pingdom/legos/pingdom_get_checkids/README.md): Get list of checkIDs given a hostname. If no hostname provided, it lists all checkIDs.
* [Get list of checkIDs given a name](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Pingdom/legos/pingdom_get_checkids_by_name/README.md): Get list of checkIDS given a name. If name is not given, it gives all checkIDs. If transaction is set to true, it returns transaction checkIDs
* [Get Pingdom Handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Pingdom/legos/pingdom_get_handle/README.md): Get Pingdom Handle
* [Pingdom Get Maintenance](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Pingdom/legos/pingdom_get_maintenance/README.md): Pingdom Get Maintenance
* [Get Pingdom Results](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Pingdom/legos/pingdom_get_results/README.md): Get Pingdom Results
* [Get Pingdom TMS Check](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Pingdom/legos/pingdom_get_tmscheck/README.md): Get Pingdom TMS Check
* [Pingdom lego to pause/unpause checkids](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Pingdom/legos/pingdom_pause_or_unpause_checkids/README.md): Pingdom lego to pause/unpause checkids
* [Perform Pingdom Traceroute](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Pingdom/legos/pingdom_traceroute/README.md): Perform Pingdom Traceroute
================================================
FILE: Pingdom/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
================================================
FILE: Pingdom/legos/__init__.py
================================================
================================================
FILE: Pingdom/legos/pingdom_create_new_maintenance_window/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Pingdom/legos/pingdom_create_new_maintenance_window/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Pingdom/legos/pingdom_create_new_maintenance_window/pingdom_create_new_maintenance_window.json
================================================
{
"action_title": "Create new maintenance window.",
"action_description": "Create new maintenance window.",
"action_type": "LEGO_TYPE_PINGDOM",
"action_entry_function": "pingdom_create_new_maintenance_window",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_INT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_PINGDOM"]
}
================================================
FILE: Pingdom/legos/pingdom_create_new_maintenance_window/pingdom_create_new_maintenance_window.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, List
from datetime import datetime as dt, timedelta
from pydantic import BaseModel, Field
from unskript.thirdparty.pingdom import swagger_client as pingdom_client
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
description: str = Field(
title='Description',
description='Description for the maintenance window.')
duration: int = Field(
title='duration',
description='Select a duration in minutes eg: 60.')
tmsids: Optional[List[int]] = Field(
default=None,
title='Transaction checks Ids',
description=('Transaction checks Ids to assign to the maintenance '
'window eg: [120824,1208233].')
)
uptimeids: Optional[List[int]] = Field(
default=None,
title='Uptime Ids',
description=('Uptime checks Ids to assign to the maintenance window eg: '
'[11061762,11061787].')
)
def pingdom_create_new_maintenance_window_printer(output):
if output is None:
return
print("\n")
pp.pprint(
(f'Successfully created maintenance window {output}',
f'starting time {dt.now().strftime("%H:%M:%S")}'))
def pingdom_create_new_maintenance_window(handle,
description: str,
duration: int,
tmsids=None,
uptimeids=None) -> int:
"""pingdom_create_new_maintenance_window .
:type handle: object
:param handle: Object returned from task.validate(...).
:type description: str
:param description: Description for the maintenance window.
:type duration: int
:param duration: Duration in minutes eg: 60.
:type tmsids: list
:param tmsids: Transaction checks Ids.
:type uptimeids: list
:param uptimeids: Uptime checks Ids to assign to the maintenance window.
:rtype:success message with window id.
"""
if uptimeids is None:
uptimeids = []
if tmsids is None:
tmsids = []
obj = {}
obj['description'] = description
start_time = dt.now()
to_time = (start_time + timedelta(minutes=duration)).strftime("%s")
obj['from'] = start_time.strftime("%s")
obj['to'] = to_time
if tmsids is not None:
obj['tmsids'] = tmsids
if uptimeids is not None:
obj['uptimeids'] = uptimeids
maintenance = pingdom_client.MaintenanceApi(api_client=handle)
result = maintenance.maintenance_post_with_http_info(_return_http_data_only=True, body=obj)
return result.maintenance.id
================================================
FILE: Pingdom/legos/pingdom_do_single_check/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Pingdom/legos/pingdom_do_single_check/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Pingdom/legos/pingdom_do_single_check/pingdom_do_single_check.json
================================================
{
"action_title": "Perform Pingdom single check",
"action_description": "Perform Pingdom Single Check",
"action_type": "LEGO_TYPE_PINGDOM",
"action_entry_function": "pingdom_do_single_check",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_PINGDOM"]
}
================================================
FILE: Pingdom/legos/pingdom_do_single_check/pingdom_do_single_check.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Dict
from pydantic import BaseModel, Field
from unskript.thirdparty.pingdom import swagger_client as pingdom_client
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
host: str = Field(
title='Host',
description='Target Host')
type: Optional[str] = Field(
'http',
title="Type",
description='Target host type. Possible values: http, smtp, pop3, imap')
def pingdom_do_single_check_printer(output):
if output is None:
return
pprint.pprint(output)
def pingdom_do_single_check(handle, host: str, type: str = 'http') -> Dict:
"""pingdom_do_single_check performs a single test using a
specified Pingdom probe against a specified target
:type handle: object
:param handle: Object returned from task.validate(...).
:type host: str
:param host: Target Host.
:type type: str
:param type: Target host type.
:rtype: Returns the results for a given single check.
"""
# Input param validation.
params = {}
params['host'] = host
params['type'] = type
check = pingdom_client.SingleApi(api_client=handle)
result = check.single_get(_return_http_data_only=True, host=host, type=type)
return result
================================================
FILE: Pingdom/legos/pingdom_get_analysis/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Pingdom/legos/pingdom_get_analysis/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Pingdom/legos/pingdom_get_analysis/pingdom_get_analysis.json
================================================
{
"action_title": "Get Pingdom Analysis Results for a specified Check",
"action_description": "Get Pingdom Analysis Results for a specified Check",
"action_type": "LEGO_TYPE_PINGDOM",
"action_entry_function": "pingdom_get_analysis",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_PINGDOM"]
}
================================================
FILE: Pingdom/legos/pingdom_get_analysis/pingdom_get_analysis.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Dict
from pydantic import BaseModel, Field
from unskript.thirdparty.pingdom import swagger_client as pingdom_client
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
checkid: int = Field(
title='Check ID',
description='Pingdom Check ID')
from_timestamp: Optional[int] = Field(
0,
title="Start Time",
description='Timestamp in the UNIX Format date +%s')
limit: Optional[int] = Field(
100,
title="Number of Results",
description="Number of Results to Return")
offset: Optional[int] = Field(
0,
title="Offset",
description='Offset for Listing (requires limit to be specified)')
to_timestamp: Optional[int] = Field(
0,
title="End Time",
description='Timestamp in the UNIX Format date +%s')
def pingdom_get_analysis_printer(output):
if output is None:
return
pprint.pprint(output)
def pingdom_get_analysis(
handle,
checkid: int,
from_timestamp: int = 0,
limit: int = 100,
offset: int = 0,
to_timestamp: int = 0
) -> Dict:
"""pingdom_get_analysis returns the list of latest root cause analysis
results for a specified check.
:type handle: object
:param handle: Object returned from task.validate(...).
:type checkid: int
:param checkid: Pingdom Check ID.
:type limit: int
:param limit: Number of returned checks.
:type offset int
:param offset: Offset of returned checks.
:type from_timestamp: int
:param from_timestamp: Start Time Timestamp in the UNIX Format date
:type to_timestamp: int
:param to_timestamp: End Time Timestamp in the UNIX Format date
:rtype: Returns the list of latest RCA results for a given check.
"""
check = pingdom_client.AnalysisApi(api_client=handle)
result = check.analysis_checkid_get_with_http_info(
_return_http_data_only=True,
checkid=checkid,
_from=from_timestamp if from_timestamp != 0 else None,
to=to_timestamp if to_timestamp != 0 else None,
limit=limit,
offset=offset
)
return result
================================================
FILE: Pingdom/legos/pingdom_get_checkids/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Pingdom/legos/pingdom_get_checkids/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Pingdom/legos/pingdom_get_checkids/pingdom_get_checkids.json
================================================
{
"action_title": "Get list of checkIDs given a hostname",
"action_description": "Get list of checkIDs given a hostname. If no hostname provided, it lists all checkIDs.",
"action_type": "LEGO_TYPE_PINGDOM",
"action_entry_function": "pingdom_get_checkids",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_PINGDOM"]
}
================================================
FILE: Pingdom/legos/pingdom_get_checkids/pingdom_get_checkids.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, List
from pydantic import BaseModel, Field
from unskript.thirdparty.pingdom import swagger_client as pingdom_client
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
host_name: Optional[str] = Field(
default=None,
title='Hostname',
description='Name of the target host.')
def pingdom_get_checkids_printer(output):
if output is None:
return
pprint.pprint(output)
def pingdom_get_checkids(handle, host_name: str = "") -> List[int]:
"""pingdom_get_checkids.
:type handle: object
:param handle: Object returned from task.validate(...).
:type host_name: str
:param host_name: Name of the target host.
:rtype: list of checkids.
"""
check = pingdom_client.ChecksApi(api_client=handle)
result = check.checks_get_with_http_info(_return_http_data_only=True)
res = result.checks
if host_name:
res = [check.id for check in res if check.hostname == host_name]
else:
res = [check.id for check in res]
return res
================================================
FILE: Pingdom/legos/pingdom_get_checkids_by_name/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Pingdom/legos/pingdom_get_checkids_by_name/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Pingdom/legos/pingdom_get_checkids_by_name/pingdom_get_checkids_by_name.json
================================================
{
"action_title": "Get list of checkIDs given a name",
"action_description": "Get list of checkIDS given a name. If name is not given, it gives all checkIDs. If transaction is set to true, it returns transaction checkIDs",
"action_type": "LEGO_TYPE_PINGDOM",
"action_entry_function": "pingdom_get_checkids_by_name",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_PINGDOM"]
}
================================================
FILE: Pingdom/legos/pingdom_get_checkids_by_name/pingdom_get_checkids_by_name.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, List
from pydantic import BaseModel, Field
from unskript.thirdparty.pingdom import swagger_client as pingdom_client
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
checkNames: Optional[List[str]] = Field(
default=None,
title='Check names',
description='''Name of the checks, . Eg: ["Google", "app"]''')
transaction: Optional[bool] = Field(
default=False,
title='Transaction',
description='''Set to true if the checks are transaction checks. Default is false''')
def pingdom_get_checkids_by_name_printer(output):
if output is None:
return
pprint.pprint(output)
def pingdom_get_checkids_by_name(handle, checkNames=None, transaction: bool = False) -> List[int]:
"""pingdom_get_checkids_by_name .
:type handle: object
:param handle: Object returned from task.validate(...).
:type checkNames: str
:param checkNames: Name of the checks.
:type transaction: bool
:param transaction: Set to true if the checks are transaction checks. Default is false.
:rtype: list of checknames.
"""
if checkNames is None:
checkNames = []
if transaction:
check = pingdom_client.TMSChecksApi(api_client=handle)
result = check.get_all_checks_with_http_info(_return_http_data_only=True)
res = result.checks
if checkNames:
res = [check.id for check in res if check.name in checkNames]
else:
res = [check.id for check in res]
else:
check = pingdom_client.ChecksApi(api_client=handle)
result = check.checks_get_with_http_info(_return_http_data_only=True)
res = result.checks
if checkNames:
res = [check.id for check in res if check.name in checkNames]
else:
res = [check.id for check in res]
return res
================================================
FILE: Pingdom/legos/pingdom_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Pingdom/legos/pingdom_get_handle/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Pingdom/legos/pingdom_get_handle/pingdom_get_handle.json
================================================
{
"action_title": "Get Pingdom Handle",
"action_description": "Get Pingdom Handle",
"action_type": "LEGO_TYPE_PINGDOM",
"action_entry_function": "pingdom_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_supports_iteration": false
}
================================================
FILE: Pingdom/legos/pingdom_get_handle/pingdom_get_handle.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def pingdom_get_handle(handle):
"""pingdom_get_handle returns the Pingdom handle.
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: Pingdom Handle.
"""
return handle
================================================
FILE: Pingdom/legos/pingdom_get_maintenance/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Pingdom/legos/pingdom_get_maintenance/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Pingdom/legos/pingdom_get_maintenance/pingdom_get_maintenance.json
================================================
{
"action_title": "Pingdom Get Maintenance",
"action_description": "Pingdom Get Maintenance",
"action_type": "LEGO_TYPE_PINGDOM",
"action_entry_function": "pingdom_get_maintenance",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_PINGDOM"]
}
================================================
FILE: Pingdom/legos/pingdom_get_maintenance/pingdom_get_maintenance.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Dict
from pydantic import BaseModel, Field
from unskript.thirdparty.pingdom import swagger_client as pingdom_client
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
limit: Optional[int] = Field(
title='Number of Results',
description='Number of Results to return')
offset: Optional[int] = Field(
title="Offset",
description='Offset of the list')
order: Optional[str] = Field(
'asc',
title="Order",
description=("Display ascending/descending order. Possible values: "
"asc, desc. NOTE: This needs to specify Order By field")
)
orderby: Optional[str] = Field(
'description',
title="Order By",
description="Order by the specific property. Eg: description"
)
def pingdom_get_maintenance_printer(output):
if output is None:
return
pprint.pprint(output)
def pingdom_get_maintenance(handle, limit: int = 0, offset: int = 0, order: str = 'asc',
orderby: str = 'description') -> Dict:
"""pingdom_get_maintenance Returns a list of Maintenance Windows
:type handle: object
:param handle: Object returned from task.validate(...).
:type limit: int
:param limit: Number of returned checks.
:type offset int
:param offset: Offset of returned checks.
:type order: str
:param order: Display ascending/descending order.
:type orderby: str
:param orderby:Order by the specific property.
:rtype: Returns the list of maintenance windows
"""
check = pingdom_client.MaintenanceApi(api_client=handle)
result = check.maintenance_get_with_http_info(
_return_http_data_only=True,
order=order,
orderby=orderby,
limit=limit if limit is not None else None,
offset=offset if offset is not None else None
)
return result
================================================
FILE: Pingdom/legos/pingdom_get_results/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Pingdom/legos/pingdom_get_results/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Pingdom/legos/pingdom_get_results/pingdom_get_results.json
================================================
{
"action_title": "Get Pingdom Results",
"action_description": "Get Pingdom Results",
"action_type": "LEGO_TYPE_PINGDOM",
"action_entry_function": "pingdom_get_results",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_PINGDOM"]
}
================================================
FILE: Pingdom/legos/pingdom_get_results/pingdom_get_results.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Dict
from pydantic import BaseModel, Field
from unskript.thirdparty.pingdom import swagger_client as pingdom_client
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
checkid: int = Field(
title='Check ID',
description='Pingdom Check ID')
status: Optional[str] = Field(
'down',
title="Status",
description=("Filter to only show specified results.Comma "
"seperated string. example: down,unconfirmed,unknown")
)
limit: Optional[int] = Field(
10,
title="Limit",
description="Number of results to get")
def pingdom_get_results_printer(output):
if output is None:
return
pprint.pprint(output)
def pingdom_get_results(handle, checkid: int, status: str = 'down', limit: int = 10) -> Dict:
"""pingdom_get_result returns a lit of raw test results for a specified check
:type handle: object
:param handle: Object returned from task.validate(...).
:type checkid: int
:param checkid: Pingdom Check ID.
:type status: str
:param status: Filter to only show specified results.Comma seperated string.
:type limit: int
:param limit: Number of returned checks.
:rtype: Returns the raw results for a given checkID.
"""
check = pingdom_client.ResultsApi(api_client=handle)
result = check.results_checkid_get_with_http_info(
_return_http_data_only=True,
checkid=checkid,
status=status,
limit=limit
)
return result
================================================
FILE: Pingdom/legos/pingdom_get_tmscheck/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Pingdom/legos/pingdom_get_tmscheck/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Pingdom/legos/pingdom_get_tmscheck/pingdom_get_tmscheck.json
================================================
{
"action_title": "Get Pingdom TMS Check",
"action_description": "Get Pingdom TMS Check",
"action_type": "LEGO_TYPE_PINGDOM",
"action_entry_function": "pingdom_get_tmscheck",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_PINGDOM"]
}
================================================
FILE: Pingdom/legos/pingdom_get_tmscheck/pingdom_get_tmscheck.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Dict
from pydantic import BaseModel, Field
from unskript.thirdparty.pingdom import swagger_client as pingdom_client
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
extended_tags: Optional[bool] = Field(
False,
title='Include Extended Tags',
description='Include extended tags')
limit: Optional[str] = Field(
'100',
title="Number of Checks",
description='Number of returned checks')
offset: Optional[str] = Field(
'0',
title="Offset",
description="Offset of returned checks")
tags: Optional[str] = Field(
title="Tags",
description='List of tags seperated by comma eg: nginx')
type: Optional[str] = Field(
title="Type",
description='Filter Type: Possible values: script, recording')
def pingdom_get_tmschecke_printer(output):
if output is None:
return
pprint.pprint(output)
def pingdom_get_tmscheck(
handle,
extended_tags: bool = False,
limit: int = 100,
offset: int = 0,
tags: str = "",
type: str = ""
) -> Dict:
"""pingdom_get_tmscheck returns the results of Transaction check
:type handle: object
:param handle: Object returned from task.validate(...).
:type extended_tags: bool
:param extended_tags: Include Extended Tags or Not.
:type limit: int
:param limit: Number of returned checks.
:type offset int
:param offset: Offset of returned checks.
:type tags: List
:param tags:List of tags seperated by comma
:type type: str
:param type: Filter Type: Possible values: script, recording.
:rtype: Returns the list of result of all transaction checks
"""
check = pingdom_client.TMSChecksApi(api_client=handle)
result = check.get_all_checks_with_http_info(
_return_http_data_only=True,
extended_tags=extended_tags,
limit=limit,
offset=offset,
tags=tags if tags is not None else None,
type=type
)
return result
================================================
FILE: Pingdom/legos/pingdom_pause_or_unpause_checkids/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Pingdom/legos/pingdom_pause_or_unpause_checkids/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Pingdom/legos/pingdom_pause_or_unpause_checkids/pingdom_pause_or_unpause_checkids.json
================================================
{
"action_title": "Pingdom lego to pause/unpause checkids",
"action_description": "Pingdom lego to pause/unpause checkids",
"action_type": "LEGO_TYPE_PINGDOM",
"action_entry_function": "pingdom_pause_or_unpause_checkids",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_PINGDOM"]
}
================================================
FILE: Pingdom/legos/pingdom_pause_or_unpause_checkids/pingdom_pause_or_unpause_checkids.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List, Optional, Dict
from pydantic import BaseModel, Field
from unskript.thirdparty.pingdom import swagger_client as pingdom_client
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
checkIds: Optional[List[str]] = Field(
title='checkIds',
description='List of check Ids to be modified. eg: ["1643815305","1643815323"].')
pause: bool = Field(
title="pause",
description='True to pause the check Ids and false to unpause it.')
resolution: int = Field(
title="resolution",
description='Interval time to test website (In Minutes). eg: 1 5 15 30 60.')
def pingdom_pause_or_unpause_checkids_printer(output):
if output is None:
return
pprint.pprint(output)
def pingdom_pause_or_unpause_checkids(handle, pause: bool, resolution: int, checkIds=None) -> Dict:
"""pingdom_pause_or_unpause_checkids returns the results pause or unpause check ids
:type handle: object
:param handle: Object returned from task.validate(...).
:type pause: bool
:param pause: True to pause the check Ids and false to unpause it.
:type resolution: int
:param resolution: Interval time to test website (In Minutes). eg: 1 5 15 30 60.
:type checkIds: List
:param checkIds: List of check Ids to be modified.
:rtype: Returns the list of result of all pause or unpause check ids
"""
if checkIds is None:
checkIds = []
data = {"paused": pause, "resolution": resolution}
if checkIds:
data["checkids"] = ",".join(checkIds)
check = pingdom_client.ChecksApi(api_client=handle)
result = check.checks_put_with_http_info(body=data, _return_http_data_only=True)
return result
================================================
FILE: Pingdom/legos/pingdom_traceroute/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Pingdom/legos/pingdom_traceroute/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Pingdom/legos/pingdom_traceroute/pingdom_traceroute.json
================================================
{
"action_title": "Perform Pingdom Traceroute",
"action_description": "Perform Pingdom Traceroute",
"action_type": "LEGO_TYPE_PINGDOM",
"action_entry_function": "pingdom_traceroute",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_PINGDOM"]
}
================================================
FILE: Pingdom/legos/pingdom_traceroute/pingdom_traceroute.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import Optional, Dict
from pydantic import BaseModel, Field
from unskript.thirdparty.pingdom import swagger_client as pingdom_client
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
host: str = Field(
title='Host',
description='Target Host eg: google.com')
probeid: Optional[int] = Field(
title="Probe ID",
description='Probe Identifier')
def pingdom_traceroute_printer(output):
if output is None:
return
pprint.pprint(output)
def pingdom_traceroute(handle, host: str, probeid = None) -> Dict:
"""pingdom_traceroute performs traceroute for a given host and returns result.
:type handle: object
:param handle: Object returned from task.validate(...).
:type host: str
:param host: Target Host eg: google.com.
:type probeid: str
:param probeid: Probe Identifier.
:rtype: Returns the list of latest RCA results for a given check.
"""
traceroute = pingdom_client.TracerouteApi(api_client=handle)
result = traceroute.traceroute_get_with_http_info(
_return_http_data_only=True,
host=host,
probeid=probeid
)
return result
================================================
FILE: Postgresql/Postgresql_Display_Long_Running.ipynb
================================================
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"id": "133bee4c",
"metadata": {
"jupyter": {
"source_hidden": false
},
"name": "Runbook Overview",
"orderProperties": [],
"tags": [],
"title": "Runbook Overview"
},
"source": [
"1. Long Running PostgreSQL Queries
2. Post Slack Message\n",
"
Here we will use unSkript Long Running PostgreSQL Queries action. This action finds out all the long-running queries on the PostgreSQL database.
\n", "\n", "\n", "Input parameters:
\n", "interval
\n", "" ] }, { "cell_type": "code", "execution_count": 10, "id": "c8565b85-30c3-43f7-9f4b-b8a3bd271861", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionCategories": [], "actionIsCheck": false, "actionNeedsCredential": true, "actionNextHop": [], "actionNextHopParameterMapping": {}, "actionOutputType": "", "actionRequiredLinesInCode": [], "actionSupportsIteration": true, "actionSupportsPoll": true, "action_modified": false, "action_uuid": "ef9f0f3dd00ef0972895ea006375f1a4496dca1b7266bc60fdfbd8ab4feee6c3", "collapsed": true, "continueOnError": false, "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Long Running PostgreSQL Queries", "execution_data": { "last_date_success_run_cell": "2023-02-15T18:50:41.391Z" }, "id": 332, "index": 332, "inputData": [ { "interval": { "constant": false, "value": "int(interval)" } } ], "inputschema": [ { "properties": { "interval": { "default": 5, "description": "Return queries running longer than interval", "title": "Interval(in minutes)", "type": "integer" } }, "title": "postgresql_long_running_queries", "type": "object" } ], "jupyter": { "outputs_hidden": true, "source_hidden": true }, "legotype": "LEGO_TYPE_POSTGRESQL", "name": "Long Running PostgreSQL Queries", "nouns": [], "orderProperties": [ "interval" ], "output": { "type": "" }, "outputParams": { "output_name": "postgresql_queries", "output_name_enabled": true }, "printOutput": true, "probeEnabled": false, "tags": [ "postgresql_long_running_queries" ], "title": "Long Running PostgreSQL Queries", "trusted": true, "verbs": [] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "import pprint \n", "\n", "from typing import List, Any, Optional, Tuple\n", "from tabulate import tabulate\n", "from pydantic import BaseModel, Field\n", "\n", "\n", "def postgresql_long_running_queries_printer(output):\n", " if output is None:\n", " return\n", "\n", " pprint.pprint(output)\n", "\n", "\n", "def postgresql_long_running_queries(handle, interval: int = 5) -> Tuple:\n", " \"\"\"postgresql_long_running_queries Runs postgres query with the provided parameters.\n", "\n", " :type handle: object\n", " :param handle: Object returned from task.validate(...).\n", "\n", " :type interval: int\n", " :param interval: Interval (in seconds).\n", "\n", " :rtype: All the results of the query.\n", " \"\"\"\n", " # Input param validation.\n", "\n", " query = \"SELECT pid, user, pg_stat_activity.query_start, now() - \" \\\n", " \"pg_stat_activity.query_start AS query_time, query, state \" \\\n", " \" FROM pg_stat_activity WHERE state = 'active' AND (now() - \" \\\n", " \"pg_stat_activity.query_start) > interval '%d seconds';\" % interval\n", "\n", " cur = handle.cursor()\n", " cur.execute(query)\n", " output = []\n", " res = cur.fetchall()\n", " data = []\n", " for records in res:\n", " result = {\n", " \"pid\": records[0],\n", " \"user\": records[1],\n", " \"query_start\": records[2],\n", " \"query_time\": records[3],\n", " \"query\": records[4],\n", " \"state\": records[5]\n", " }\n", " output.append(result)\n", " data.append([records[0], records[4], records[5], records[3]])\n", "\n", " if len(res) > 0:\n", " headers = [\"pid\", \"query\", \"state\", \"duration\"]\n", " print(\"\\n\")\n", " output = tabulate(data, headers=headers, tablefmt=\"grid\")\n", "\n", " handle.commit()\n", " cur.close()\n", " handle.close()\n", " if len(output) != 0:\n", " return (False, output)\n", " else:\n", " return (True, None)\n", "\n", "task = Task(Workflow())\n", "\n", "task.configure(inputParamsJson='''{\n", " \"interval\": \"int(interval)\"\n", " }''')\n", "task.configure(outputName=\"postgresql_queries\")\n", "task.configure(printOutput=True)\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.execute(postgresql_long_running_queries, lego_printer=postgresql_long_running_queries_printer, hdl=hdl, args=args)" ], "output": {} }, { "attachments": {}, "cell_type": "markdown", "id": "5b8a6162-5475-422d-98c6-7d756956ed8f", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-1 Extension", "orderProperties": [], "tags": [], "title": "Step-1 Extension" }, "source": [ "Output variable:
\n", "postgresql_queries
In this action, we modify the output from step 1 and return a list of dictionary items for all the long-running queries on the PostgreSQL database.
\n", "\n", "" ] }, { "cell_type": "code", "execution_count": 12, "id": "e8b0d7b7-03a5-456c-971a-a638b2435eeb", "metadata": { "collapsed": true, "customAction": true, "execution_data": { "last_date_success_run_cell": "2023-02-15T18:58:06.161Z" }, "jupyter": { "outputs_hidden": true, "source_hidden": true }, "name": "Modify Output", "orderProperties": [], "tags": [], "title": "Modify Output", "trusted": true, "credentialsJson": {} }, "outputs": [], "source": [ "sql_queries = []\n", "if postgresql_queries[0] == False:\n", " for queries in postgresql_queries[1]:\n", " sql_queries.append(queries)" ], "output": {} }, { "cell_type": "markdown", "id": "1256bbdf", "metadata": { "jupyter": { "source_hidden": false }, "name": "Step-2", "orderProperties": [], "tags": [], "title": "Step-2" }, "source": [ "Output variable: postgresql_queries
\n", "
Here we will use unSkript Post Slack Message action. This action posts the message to the slack channel about the long-running queries on the PostgreSQL database.
\n", "\n", "\n", "Input parameters:
\n", "channel, message
\n", "" ] }, { "cell_type": "code", "execution_count": 7, "id": "84b2379b-c11c-42a8-8575-8b75efe52574", "metadata": { "accessType": "ACCESS_TYPE_UNSPECIFIED", "actionBashCommand": false, "actionNeedsCredential": true, "actionSupportsIteration": true, "actionSupportsPoll": true, "action_uuid": "6a87f83ab0ecfeecb9c98d084e2b1066c26fa64be5b4928d5573a5d60299802d", "createTime": "1970-01-01T00:00:00Z", "credentialsJson": {}, "currentVersion": "0.1.0", "description": "Post Slack Message", "id": 44, "index": 44, "inputData": [ { "channel": { "constant": false, "value": "channel" }, "message": { "constant": false, "value": "f\"Long Running Queries : {sql_queries}\"" } } ], "inputschema": [ { "properties": { "channel": { "description": "Name of the slack channel where the message to be posted", "title": "Channel", "type": "string" }, "message": { "description": "Message to be sent", "title": "Message", "type": "string" } }, "required": [ "channel", "message" ], "title": "slack_post_message", "type": "object" } ], "jupyter": { "source_hidden": true }, "legotype": "LEGO_TYPE_SLACK", "name": "Post Slack Message", "nouns": [ "slack", "message" ], "orderProperties": [ "channel", "message" ], "output": { "type": "" }, "outputParams": { "output_name": "message_status", "output_name_enabled": true }, "printOutput": true, "tags": [ "slack_post_message" ], "title": "Post Slack Message", "verbs": [ "post" ] }, "outputs": [], "source": [ "##\n", "# Copyright (c) 2021 unSkript, Inc\n", "# All rights reserved.\n", "##\n", "\n", "import pprint\n", "\n", "from pydantic import BaseModel, Field\n", "from slack_sdk import WebClient\n", "from slack_sdk.errors import SlackApiError\n", "\n", "pp = pprint.PrettyPrinter(indent=2)\n", "\n", "\n", "from beartype import beartype\n", "def legoPrinter(func):\n", " def Printer(*args, **kwargs):\n", " output = func(*args, **kwargs)\n", " if output:\n", " channel = kwargs[\"channel\"]\n", " pp.pprint(print(f\"Message sent to Slack channel {channel}\"))\n", " return output\n", " return Printer\n", "\n", "\n", "@legoPrinter\n", "@beartype\n", "def slack_post_message(\n", " handle: WebClient,\n", " channel: str,\n", " message: str) -> bool:\n", "\n", " try:\n", " response = handle.chat_postMessage(\n", " channel=channel,\n", " text=message)\n", " return True\n", " except SlackApiError as e:\n", " print(\"\\n\\n\")\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.response['error']}\")\n", " return False\n", " except Exception as e:\n", " print(\"\\n\\n\")\n", " pp.pprint(\n", " f\"Failed sending message to slack channel {channel}, Error: {e.__str__()}\")\n", " return False\n", "\n", "\n", "task = Task(Workflow())\n", "task.configure(printOutput=True)\n", "task.configure(inputParamsJson='''{\n", " \"channel\": \"channel\",\n", " \"message\": \"f\\\\\"Long Running Queries : {sql_queries}\\\\\"\"\n", " }''')\n", "task.configure(outputName=\"message_status\")\n", "\n", "(err, hdl, args) = task.validate(vars=vars())\n", "if err is None:\n", " task.output = task.execute(slack_post_message, hdl=hdl, args=args)\n", " if task.output_name != None:\n", " globals().update({task.output_name: task.output[0]})" ], "output": {} }, { "cell_type": "markdown", "id": "f45b5e96", "metadata": { "jupyter": { "source_hidden": false }, "name": "Conclusion", "orderProperties": [], "tags": [], "title": "Conclusion" }, "source": [ "Output variable:
\n", "message_status
In this Runbook, we demonstrated the use of unSkript's PostgreSQL legos to run PostgreSQL Query and display and collect the long-running queries from a database and send the message to a slack channel. To view the full platform capabilities of unSkript please visit https://us.app.unskript.io
" ] } ], "metadata": { "execution_data": { "runbook_name": "Display long running queries in a PostgreSQL database", "parameters": [ "interval", "channel" ] }, "kernelspec": { "display_name": "unSkript (Build: 891)", "name": "python_kubernetes" }, "language_info": { "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "pygments_lexer": "ipython3" }, "parameterSchema": { "properties": { "interval": { "default": "5", "description": "Time interval (in seconds) to check for long queries", "title": "interval", "type": "number" }, "channel": { "description": "Slack channel to post to", "title": "channel", "type": "string" } }, "required": [], "title": "Schema", "type": "object" }, "parameterValues": null }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: Postgresql/Postgresql_Display_Long_Running.json ================================================ { "name": "Display long running queries in a PostgreSQL database", "description": "This runbook displays collects the long running queries from a database and sends a message to the specified slack channel. Poorly optimized queries and excessive connections can cause problems in PostgreSQL, impacting upstream services.", "uuid": "adcf88e8035c594e599fc9a33c28c9099187211f6daccb9d3ab4e5d17993086f", "icon": "CONNECTOR_TYPE_POSTGRESQL", "categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE" ], "connector_types": [ "CONNECTOR_TYPE_POSTGRESQL" ], "version": "1.0.0" } ================================================ FILE: Postgresql/README.md ================================================ # Postgresql RunBooks * [Display long running queries in a PostgreSQL database](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Postgresql/Postgresql_Display_Long_Running.ipynb): This runbook displays collects the long running queries from a database and sends a message to the specified slack channel. Poorly optimized queries and excessive connections can cause problems in PostgreSQL, impacting upstream services. # Postgresql Actions * [PostgreSQL Calculate Bloat](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Postgresql/legos/postgres_calculate_bloat/README.md): This Lego calculates bloat for tables in Postgres * [Calling a PostgreSQL function](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Postgresql/legos/postgresql_call_function/README.md): Calling a PostgreSQL function * [PostgreSQL Check Unused Indexes](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Postgresql/legos/postgresql_check_unused_indexes/README.md): Find unused Indexes in a database in PostgreSQL * [Create Tables in PostgreSQL](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Postgresql/legos/postgresql_create_table/README.md): Create Tables PostgreSQL * [Delete PostgreSQL Query](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Postgresql/legos/postgresql_delete_query/README.md): Delete PostgreSQL Query * [PostgreSQL Get Cache Hit Ratio](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Postgresql/legos/postgresql_get_cache_hit_ratio/README.md): The result of the action will show the total number of blocks read from disk, the total number of blocks found in the buffer cache, and the cache hit ratio as a percentage. For example, if the cache hit ratio is 99%, it means that 99% of all data requests were served from the buffer cache, and only 1% required reading data from disk. * [Get PostgreSQL Handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Postgresql/legos/postgresql_get_handle/README.md): Get PostgreSQL Handle * [PostgreSQL Get Index Usage](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Postgresql/legos/postgresql_get_index_usage/README.md): The action result shows the data for table name, the percentage of times an index was used for that table, and the number of live rows in the table. * [PostgreSQL get service status](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Postgresql/legos/postgresql_get_server_status/README.md): This action checks the status of each database. * [Execute commands in a PostgreSQL transaction.](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Postgresql/legos/postgresql_handling_transaction/README.md): Given a set of PostgreSQL commands, this actions run them inside a transaction. * [Long Running PostgreSQL Queries](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Postgresql/legos/postgresql_long_running_queries/README.md): Long Running PostgreSQL Queries * [Read PostgreSQL Query](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Postgresql/legos/postgresql_read_query/README.md): Read PostgreSQL Query * [Show tables in PostgreSQL Database](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Postgresql/legos/postgresql_show_tables/README.md): Show the tables existing in a PostgreSQL Database. We execute the following query to fetch this information SELECT * FROM pg_catalog.pg_tables WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema'; * [Call PostgreSQL Stored Procedure](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Postgresql/legos/postgresql_stored_procedures/README.md): Call PostgreSQL Stored Procedure * [Write PostgreSQL Query](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Postgresql/legos/postgresql_write_query/README.md): Write PostgreSQL Query ================================================ FILE: Postgresql/__init__.py ================================================ # # Copyright (c) 2021 unSkript.com # All rights reserved. # # # ================================================ FILE: Postgresql/legos/__init__.py ================================================ ================================================ FILE: Postgresql/legos/postgres_calculate_bloat/README.md ================================================ [
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Postgresql/legos/postgres_calculate_bloat/__init__.py
================================================
================================================
FILE: Postgresql/legos/postgres_calculate_bloat/postgres_calculate_bloat.json
================================================
{
"action_title": "PostgreSQL Calculate Bloat",
"action_description": "This Lego calculates bloat for tables in Postgres",
"action_type": "LEGO_TYPE_POSTGRESQL",
"action_entry_function": "postgres_calculate_bloat",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_POSTGRESQL","CATEGORY_TYPE_TROUBLESHOOTING"]
}
================================================
FILE: Postgresql/legos/postgres_calculate_bloat/postgres_calculate_bloat.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List
from tabulate import tabulate
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def postgres_calculate_bloat_printer(output):
if output is None:
return
data = []
output_rows = []
for records in output:
result = {
"database_name": records[0],
"schema_name": records[1],
"table_name": records[2],
"can_estimate": records[3],
"live_rows_count": records[4],
"pct_bloat": records[5],
"mb_bloat": records[6],
"table_mb": records[7]
}
output_rows.append(result)
data.append([records[2], records[5], records[6]])
if len(output) > 0:
headers = ["Table", "% Bloat", "Size(MB)"]
output_rows = tabulate(data, headers=headers, tablefmt="grid")
pprint.pprint(output_rows)
def postgres_calculate_bloat(handle) -> List:
"""postgres_calculate_bloat returns pecentage Bloat and Size Bloat of tables in a database
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: Pecentage Bloat and Size Bloat of tables in a database
"""
query = "WITH constants AS ( SELECT current_setting('block_size')::numeric AS bs, "\
"23 AS hdr, 8 AS ma), no_stats AS ( SELECT table_schema, table_name, n_live_tup:"\
":numeric as est_rows,pg_table_size(relid)::numeric as table_size FROM "\
"information_schema.columns JOIN pg_stat_user_tables as psut "\
"ON table_schema = psut.schemaname "\
"AND table_name = psut.relname "\
"LEFT OUTER JOIN pg_stats "\
"ON table_schema = pg_stats.schemaname "\
"AND table_name = pg_stats.tablename "\
"AND column_name = attname "\
"WHERE attname IS NULL "\
"AND table_schema NOT IN ('pg_catalog', 'information_schema') "\
"GROUP BY table_schema, table_name, relid, n_live_tup "\
"), "\
"null_headers AS ( "\
"SELECT "\
"hdr+1+(sum(case when null_frac <> 0 THEN 1 else 0 END)/8) as nullhdr, "\
"SUM((1-null_frac)*avg_width) as datawidth, "\
"MAX(null_frac) as maxfracsum, "\
"schemaname, "\
"tablename, "\
"hdr, ma, bs "\
"FROM pg_stats CROSS JOIN constants "\
"LEFT OUTER JOIN no_stats "\
"ON schemaname = no_stats.table_schema "\
"AND tablename = no_stats.table_name "\
"WHERE schemaname NOT IN ('pg_catalog', 'information_schema') "\
"AND no_stats.table_name IS NULL "\
"AND EXISTS ( SELECT 1 "\
"FROM information_schema.columns "\
"WHERE schemaname = columns.table_schema "\
"AND tablename = columns.table_name ) "\
"GROUP BY schemaname, tablename, hdr, ma, bs "\
"), "\
"data_headers AS ( "\
"SELECT "\
"ma, bs, hdr, schemaname, tablename, "\
"(datawidth+(hdr+ma-(case when hdr%ma=0 THEN ma ELSE hdr%ma END)))::numeric AS datahdr, "\
"(maxfracsum*(nullhdr+ma-(case when nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2 "\
"FROM null_headers "\
"), "\
"table_estimates AS ( "\
"SELECT schemaname, tablename, bs, "\
"reltuples::numeric as est_rows, relpages * bs as table_bytes, "\
"CEIL((reltuples* "\
"(datahdr + nullhdr2 + 4 + ma - "\
"(CASE WHEN datahdr%ma=0 "\
"THEN ma ELSE datahdr%ma END) "\
")/(bs-20))) * bs AS expected_bytes, "\
"reltoastrelid "\
"FROM data_headers "\
"JOIN pg_class ON tablename = relname "\
"JOIN pg_namespace ON relnamespace = pg_namespace.oid "\
"AND schemaname = nspname "\
"WHERE pg_class.relkind = 'r' "\
"), "\
"estimates_with_toast AS ( "\
"SELECT schemaname, tablename, "\
"TRUE as can_estimate, "\
"est_rows, "\
"table_bytes + ( coalesce(toast.relpages, 0) * bs ) as table_bytes, "\
"expected_bytes + ( ceil( coalesce(toast.reltuples, 0) / 4 ) * bs ) as expected_bytes "\
"FROM table_estimates LEFT OUTER JOIN pg_class as toast "\
"ON table_estimates.reltoastrelid = toast.oid "\
"AND toast.relkind = 't' "\
"), "\
"table_estimates_plus AS ( "\
"SELECT current_database() as databasename, "\
"schemaname, tablename, can_estimate, "\
"est_rows, "\
"CASE WHEN table_bytes > 0 "\
"THEN table_bytes::NUMERIC "\
"ELSE NULL::NUMERIC END "\
"AS table_bytes, "\
"CASE WHEN expected_bytes > 0 "\
"THEN expected_bytes::NUMERIC "\
"ELSE NULL::NUMERIC END "\
"AS expected_bytes, "\
"CASE WHEN expected_bytes > 0 AND table_bytes > 0 "\
"AND expected_bytes <= table_bytes "\
"THEN (table_bytes - expected_bytes)::NUMERIC "\
"ELSE 0::NUMERIC END AS bloat_bytes "\
"FROM estimates_with_toast "\
"UNION ALL "\
"SELECT current_database() as databasename, "\
"table_schema, table_name, FALSE, "\
"est_rows, table_size, "\
"NULL::NUMERIC, NULL::NUMERIC "\
"FROM no_stats "\
"), "\
"bloat_data AS ( "\
"select current_database() as databasename, "\
"schemaname, tablename, can_estimate, "\
"table_bytes, round(table_bytes/(1024^2)::NUMERIC,3) as table_mb, "\
"expected_bytes, round(expected_bytes/(1024^2)::NUMERIC,3) as expected_mb, "\
"round(bloat_bytes*100/table_bytes) as pct_bloat, "\
"round(bloat_bytes/(1024::NUMERIC^2),2) as mb_bloat, "\
"table_bytes, expected_bytes, est_rows "\
"FROM table_estimates_plus "\
") "\
"SELECT databasename, schemaname, tablename, "\
"can_estimate, "\
"est_rows, "\
"pct_bloat, mb_bloat, "\
"table_mb "\
"FROM bloat_data "\
"ORDER BY pct_bloat DESC; "
cur = handle.cursor()
cur.execute(query)
result = cur.fetchall()
handle.commit()
cur.close()
handle.close()
return result
================================================
FILE: Postgresql/legos/postgresql_call_function/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Postgresql/legos/postgresql_call_function/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Postgresql/legos/postgresql_call_function/postgresql_call_function.json
================================================
{
"action_title": "Calling a PostgreSQL function",
"action_description": "Calling a PostgreSQL function",
"action_type": "LEGO_TYPE_POSTGRESQL",
"action_entry_function": "postgresql_call_function",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_POSTGRESQL","CATEGORY_TYPE_POSTGRESQL_QUERY"]
}
================================================
FILE: Postgresql/legos/postgresql_call_function/postgresql_call_function.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from typing import List, Any
from pydantic import BaseModel, Field
import psycopg2
from tabulate import tabulate
class InputSchema(BaseModel):
function_name: str = Field(
title='Function Name',
description='Calling a PostgreSQL function')
params: list = Field(
None,
title='Parameters',
description='Parameters to the function in list format. For eg: [1, 2]')
def postgresql_call_function_printer(output):
print("\n")
if len(output) > 0:
print("\n")
print(tabulate(output, tablefmt="grid"))
return output
def postgresql_call_function(handle, function_name: str, params: List = List[Any]) -> List:
"""postgresql_call_function Runs postgresql function with the provided parameters.
:type handle: object
:param handle: Object returned from task.validate(...).
:type function_name: str
:param function_name: Function Name.
:type params: List
:param params: Parameters to the Function in list format.
:rtype: List result of the function.
"""
data = []
try:
cur = handle.cursor()
cur.callproc(function_name, params)
# process the result set
res = cur.fetchall()
for records in res:
data.append(record for record in records)
# Close communication with the PostgreSQL database
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(f"Error : {error}")
finally:
if handle:
handle.close()
return data
================================================
FILE: Postgresql/legos/postgresql_check_active_connections/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Postgresql/legos/postgresql_check_active_connections/__init__.py
================================================
================================================
FILE: Postgresql/legos/postgresql_check_active_connections/postgresql_check_active_connections.json
================================================
{
"action_title": "PostgreSQL check active connections",
"action_description": "Checks if the percentage of active connections to the database exceeds the provided threshold.",
"action_type": "LEGO_TYPE_POSTGRESQL",
"action_entry_function": "postgresql_check_active_connections",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": [
""
],
"action_next_hop_parameter_mapping": {},
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: Postgresql/legos/postgresql_check_active_connections/postgresql_check_active_connections.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from tabulate import tabulate
class InputSchema(BaseModel):
threshold_connections: Optional[int] = Field(
100,
description='Number of connections to consider as the threshold.',
title='Threshold no. of connections',
)
def postgresql_check_active_connections_printer(output):
status, data = output
if not status and data:
headers = ["Active Connections", "Threshold(%)"]
table_data = [[record["active_connections"], record["threshold"]] for record in data]
print(tabulate(table_data, headers=headers, tablefmt="grid"))
else:
print("Active connections are below the threshold.")
def postgresql_check_active_connections(handle, threshold_percentage: int = 85) -> Tuple:
"""
postgresql_check_active_connections checks if the percentage of active connections to the database
exceeds the provided threshold.
:type handle: object
:param handle: Object returned from task.validate(...).
:type threshold_percentage: float
:param threshold_percentage: Optional, percentage of connections to consider as the threshold.
:rtype: Status, Result of active connections if any in tabular format
"""
# Query to fetch the count of active connections
query_active_connections = "SELECT COUNT(*) FROM pg_stat_activity WHERE state = 'active';"
# Query to fetch the total pool count
query_pool_count = "SELECT setting::int FROM pg_settings WHERE name='max_connections';"
result = []
try:
cur = handle.cursor()
# Fetch the total pool count
cur.execute(query_pool_count)
total_pool_count = cur.fetchone()[0]
# Calculate the threshold from the total pool count
threshold = int((total_pool_count * threshold_percentage)/100)
# Fetch the count of active connections
cur.execute(query_active_connections)
active_connections = cur.fetchone()[0]
handle.commit()
cur.close()
handle.close()
if active_connections > threshold:
data = {
"active_connections": active_connections,
"threshold": threshold,
}
result.append(data)
except Exception as e:
print("Error occurred:", e)
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: Postgresql/legos/postgresql_check_locks/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Postgresql/legos/postgresql_check_locks/__init__.py
================================================
================================================
FILE: Postgresql/legos/postgresql_check_locks/postgresql_check_locks.json
================================================
{
"action_title": "PostgreSQL check for locks in database",
"action_description": "Checks for any locks in the postgres database.",
"action_type": "LEGO_TYPE_POSTGRESQL",
"action_entry_function": "postgresql_check_locks",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": [
""
],
"action_next_hop_parameter_mapping": {},
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: Postgresql/legos/postgresql_check_locks/postgresql_check_locks.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from typing import Tuple
from pydantic import BaseModel
from tabulate import tabulate
class InputSchema(BaseModel):
pass
def postgresql_check_locks_printer(output):
status, data = output
if not status and data:
headers = ["PID", "Relation", "Lock Mode", "Granted"]
table_data = [[record["pid"], record["relation"], record["lock_mode"], record["granted"]] for record in data]
print(tabulate(table_data, headers=headers, tablefmt="grid"))
else:
print("No ungranted locks found.")
def postgresql_check_locks(handle) -> Tuple:
"""
postgresql_check_locks identifies and returns the current locks in the PostgreSQL database.
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: Status, Result of current locks if any in tabular format
"""
# Query to fetch current locks in the database
query = """
SELECT
pid,
relation::regclass,
mode,
granted
FROM
pg_locks
WHERE
granted IS FALSE;
"""
result = []
try:
cur = handle.cursor()
cur.execute(query)
res = cur.fetchall()
handle.commit()
cur.close()
handle.close()
for record in res:
data = {
"pid": record[0],
"relation": record[1],
"lock_mode": record[2],
"granted": record[3]
}
result.append(data)
except Exception as e:
print("Error occurred:", e)
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: Postgresql/legos/postgresql_check_unused_indexes/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Postgresql/legos/postgresql_check_unused_indexes/__init__.py
================================================
================================================
FILE: Postgresql/legos/postgresql_check_unused_indexes/postgresql_check_unused_indexes.json
================================================
{
"action_title": "PostgreSQL Check Unused Indexes",
"action_description": "Find unused Indexes in a database in PostgreSQL",
"action_type": "LEGO_TYPE_POSTGRESQL",
"action_entry_function": "postgresql_check_unused_indexes",
"action_needs_credential": true,
"action_is_check": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_POSTGRESQL"],
"action_next_hop": [],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Postgresql/legos/postgresql_check_unused_indexes/postgresql_check_unused_indexes.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, Tuple
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
index_scans: Optional[int] = Field(
default=10,
title='Index Scans',
description='Number of index scans initiated on this index')
index_size: Optional[int] = Field(
default=5242880, # 5GB
title='Index Size',
description='On-disk size in kB (kilobytes) of the table.')
def postgresql_check_unused_indexes_printer(output):
if output is None:
return
pprint.pprint(output)
def postgresql_check_unused_indexes(handle, index_scans:int=10,index_size:int=5242880) -> Tuple:
"""postgresql_check_unused_indexes returns unused indexes in a database
:type handle: object
:param handle: Object returned from task.validate(...).
:type index_scans: int
:param index_scans: Optional, Number of index scans initiated on this index
:type index_size: int
:param index_size: Opitonal, On-disk size in kB (kilobytes) of the table.
:rtype: Status, Result of unused indexes if any in tabular format
"""
size = int(index_size)
scans = int(index_scans)
query = "SELECT schemaname || '.' || relname AS table,indexrelname AS index," \
"pg_size_pretty(pg_relation_size(i.indexrelid)) AS index_size,idx_scan as index_scans " \
" FROM pg_stat_user_indexes ui JOIN pg_index i ON ui.indexrelid = i.indexrelid "\
" WHERE NOT indisunique AND idx_scan < " + str(scans) + " AND pg_relation_size(relid) > "+ \
str(size)+\
" ORDER BY pg_relation_size(i.indexrelid) / nullif(idx_scan, 0) DESC NULLS FIRST,"\
"pg_relation_size(i.indexrelid) DESC "
#In the above query:
#pg_relation_size accepts the OID or name of a table, index or toast table,
# and returns the on-disk size in bytes of one fork of that relation.
# (Note that for most purposes it is more convenient to use the higher-level
# functions pg_total_relation_size or pg_table_size, which sum the sizes of all forks.)
# With one argument, it returns the size of the main data fork of the relation.
# The second argument can be provided to specify which fork to examine:
# 1. 'main' returns the size of the main data fork of the relation.
# 2. 'fsm' returns the size of the Free Space Map
# 3. 'vm' returns the size of the Visibility Map
# 4. 'init' returns the size of the initialization fork, if any, associated with the relation.
# We are getting the main data fork size
result = []
cur = handle.cursor()
cur.execute(query)
res = cur.fetchall()
handle.commit()
cur.close()
handle.close()
data = []
for records in res:
data = {
"table_name": records[0],
"index_name": records[1],
"index_size": records[2],
"index_scans": records[3],
}
result.append(data)
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: Postgresql/legos/postgresql_create_table/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Postgresql/legos/postgresql_create_table/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Postgresql/legos/postgresql_create_table/postgresql_create_table.json
================================================
{
"action_title": "Create Tables in PostgreSQL",
"action_description": "Create Tables PostgreSQL",
"action_type": "LEGO_TYPE_POSTGRESQL",
"action_entry_function": "postgresql_create_table",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_POSTGRESQL","CATEGORY_TYPE_POSTGRESQL_TABLE"]
}
================================================
FILE: Postgresql/legos/postgresql_create_table/postgresql_create_table.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from typing import Dict
from pydantic import BaseModel, Field
import psycopg2
class InputSchema(BaseModel):
commands: list = Field(
title='Commands to create tables',
description='''
Postgres create table.
For eg. ["CREATE TABLE test (_id SERIAL PRIMARY KEY, _name VARCHAR(255) NOT NULL)",
"CREATE TABLE foo (_id SERIAL PRIMARY KEY)"]
''')
def postgresql_create_table_printer(output):
print("\n")
print(output)
return output
def postgresql_create_table(handle, commands: tuple) -> Dict:
"""postgresql_create_table Runs postgres query with the provided parameters.
:type handle: object
:param handle: Object returned from task.validate(...).
:type commands: tuple
:param commands: Commands to create tables.
:rtype: All the results of the query.
"""
# Input param validation.
output = {}
try:
cur = handle.cursor()
# create table one by one
for command in tuple(commands):
cur.execute(command)
# close communication with the PostgreSQL database server
cur.close()
# commit the changes
handle.commit()
output['result'] = 'Tables Created Sucessfully'
except (Exception, psycopg2.DatabaseError) as error:
output["result"] = error
finally:
if handle:
handle.close()
return output
================================================
FILE: Postgresql/legos/postgresql_delete_query/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Postgresql/legos/postgresql_delete_query/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Postgresql/legos/postgresql_delete_query/postgresql_delete_query.json
================================================
{
"action_title": "Delete PostgreSQL Query",
"action_description": "Delete PostgreSQL Query",
"action_type": "LEGO_TYPE_POSTGRESQL",
"action_entry_function": "postgresql_delete_query",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_NONE",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_POSTGRESQL","CATEGORY_TYPE_POSTGRESQL_QUERY"]
}
================================================
FILE: Postgresql/legos/postgresql_delete_query/postgresql_delete_query.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
import psycopg2
from pydantic import BaseModel, Field
pp = pprint.PrettyPrinter(indent=2)
class InputSchema(BaseModel):
query: str = Field(
title='Delete Query',
description='Postgres delete query.')
def postgresql_delete_query(handle, query:str):
"""postgresql_delete_query Runs postgres query with the provided parameters.
:type handle: object
:param handle: Object returned from task.validate(...).
:type query: str
:param query: Postgresql Delete query.
:rtype: All the results of the query.
"""
# Input param validation.
delete_statement = query
try:
cur = handle.cursor()
cur.execute(delete_statement)
# get the number of updated rows
rows_deleted = cur.rowcount
# Commit the changes to the database
handle.commit()
# Close communication with the PostgreSQL database
cur.close()
print("\n")
pp.pprint("Deleted Record successfully")
pp.pprint(f"The number of deleted rows: {rows_deleted}")
except (Exception, psycopg2.DatabaseError) as error:
pp.pprint(f"Error : {error}")
finally:
if handle:
handle.close()
pp.pprint("PostgreSQL connection is closed")
================================================
FILE: Postgresql/legos/postgresql_get_cache_hit_ratio/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Postgresql/legos/postgresql_get_cache_hit_ratio/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Postgresql/legos/postgresql_get_cache_hit_ratio/postgresql_get_cache_hit_ratio.json
================================================
{
"action_title": "PostgreSQL Get Cache Hit Ratio",
"action_description": "The result of the action will show the total number of blocks read from disk, the total number of blocks found in the buffer cache, and the cache hit ratio as a percentage. For example, if the cache hit ratio is 99%, it means that 99% of all data requests were served from the buffer cache, and only 1% required reading data from disk.",
"action_type": "LEGO_TYPE_POSTGRESQL",
"action_entry_function": "postgresql_get_cache_hit_ratio",
"action_needs_credential": true,
"action_is_check": false,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_POSTGRESQL","CATEGORY_TYPE_TROUBLESHOOTING"],
"action_next_hop": [],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Postgresql/legos/postgresql_get_cache_hit_ratio/postgresql_get_cache_hit_ratio.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Tuple
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def postgresql_get_cache_hit_ratio_printer(output):
if output is None or output[1] is None:
print("No cache hit ratio data available.")
return
op = output[1]
if len(op) > 0:
cache_hit_ratio = op[0][2] * 100
print(f"Cache hit ratio: {cache_hit_ratio:.2f}%")
else:
print("No cache hit ratio data available.")
pprint.pprint(output)
def postgresql_get_cache_hit_ratio(handle) -> Tuple:
"""postgresql_get_cache_hit_ratio Runs postgresql query to get the Cache hit ratio.
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: All the results of the query.
"""
# Query to get the Cache hit ratio.
query = """SELECT sum(heap_blks_read) as heap_read, sum(heap_blks_hit) as heap_hit,
sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read)) as ratio FROM
pg_statio_user_tables;"""
cur = handle.cursor()
cur.execute(query)
res = cur.fetchall()
handle.commit()
cur.close()
handle.close()
if res is not None and len(res) > 0 and res[0][2] is not None:
cache_hit_ratio = res[0][2] * 100
if cache_hit_ratio >= 99:
return (True, res)
return (False, res)
return (False, None)
================================================
FILE: Postgresql/legos/postgresql_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Postgresql/legos/postgresql_get_handle/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Postgresql/legos/postgresql_get_handle/postgresql_get_handle.json
================================================
{
"action_title": "Get PostgreSQL Handle",
"action_description": "Get PostgreSQL Handle",
"action_type": "LEGO_TYPE_POSTGRESQL",
"action_entry_function": "postgresql_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_supports_iteration": false
}
================================================
FILE: Postgresql/legos/postgresql_get_handle/postgresql_get_handle.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def postgresql_get_handle(handle):
"""postgresql_get_handle returns the postgresql connection handle.
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: postgresql Handle.
"""
return handle
================================================
FILE: Postgresql/legos/postgresql_get_index_usage/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Postgresql/legos/postgresql_get_index_usage/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Postgresql/legos/postgresql_get_index_usage/postgresql_get_index_usage.json
================================================
{
"action_title": "PostgreSQL Get Index Usage",
"action_description": "The action result shows the data for table name, the percentage of times an index was used for that table, and the number of live rows in the table.",
"action_type": "LEGO_TYPE_POSTGRESQL",
"action_entry_function": "postgresql_get_index_usage",
"action_needs_credential": true,
"action_is_check": false,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_POSTGRESQL","CATEGORY_TYPE_TROUBLESHOOTING"]
}
================================================
FILE: Postgresql/legos/postgresql_get_index_usage/postgresql_get_index_usage.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from typing import List
from tabulate import tabulate
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def postgresql_get_index_usage_printer(output):
data = []
for records in output:
data.append(record for record in records)
headers = ['Table Name', 'Index Usage Percentage', 'Number of Rows']
print(tabulate(data, headers=headers, tablefmt="grid"))
def postgresql_get_index_usage(handle) -> List:
"""postgresql_get_index_usage Runs postgresql query to get index usage.
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: All the results of the query.
"""
# Query to get the Index Usage.
query = """
SELECT
relname,
100 * idx_scan / (seq_scan + idx_scan) percent_of_times_index_used,
n_live_tup rows_in_table
FROM
pg_stat_user_tables
WHERE
seq_scan + idx_scan > 0
ORDER BY
n_live_tup DESC;
"""
cur = handle.cursor()
cur.execute(query)
res = cur.fetchall()
handle.commit()
cur.close()
handle.close()
return res
================================================
FILE: Postgresql/legos/postgresql_get_server_status/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Postgresql/legos/postgresql_get_server_status/__init__.py
================================================
================================================
FILE: Postgresql/legos/postgresql_get_server_status/postgresql_get_server_status.json
================================================
{
"action_title": "PostgreSQL get service status",
"action_description": "This action checks the status of each database.",
"action_type": "LEGO_TYPE_POSTGRESQL",
"action_entry_function": "postgresql_get_server_status",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_POSTGRESQL"],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Postgresql/legos/postgresql_get_server_status/postgresql_get_server_status.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from typing import Tuple
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def postgresql_get_server_status_printer(output):
if output[0]:
print("PostgreSQL Server Status: Reachable")
else:
error_message = output[1]['message'] if output[1] else "Unknown error"
print("PostgreSQL Server Status: Unreachable")
print(f"Error: {error_message}")
def postgresql_get_server_status(handle) -> Tuple:
"""
Returns a simple status indicating the reachability of the PostgreSQL server.
:type handle: object
:param handle: PostgreSQL connection object
:return: Tuple containing a boolean indicating success and optional error message
"""
try:
cur = handle.cursor()
cur.execute("SELECT 1;")
cur.fetchone()
return (True, None)
except Exception as e:
return (False, {"message": str(e)})
finally:
handle.close()
================================================
FILE: Postgresql/legos/postgresql_handling_transaction/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Postgresql/legos/postgresql_handling_transaction/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Postgresql/legos/postgresql_handling_transaction/postgresql_handling_transaction.json
================================================
{
"action_title": "Execute commands in a PostgreSQL transaction.",
"action_description": "Given a set of PostgreSQL commands, this actions run them inside a transaction.",
"action_type": "LEGO_TYPE_POSTGRESQL",
"action_entry_function": "postgresql_handling_transaction",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_NONE",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_POSTGRESQL"]
}
================================================
FILE: Postgresql/legos/postgresql_handling_transaction/postgresql_handling_transaction.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
import psycopg2
from pydantic import BaseModel, Field
pp = pprint.PrettyPrinter(indent=2)
class InputSchema(BaseModel):
transaction: str = Field(
title='Commands',
description='''
PostgreSQL commands to be run inside a transaction. The commands should be ; separated. For eg:
UPDATE test SET name = 'test-update3' WHERE _id = 3;
UPDATE test SET name = 'test-update3' WHERE _id = 4;
''')
def postgresql_handling_transaction(handle, transaction:str):
"""postgresql_handling_transactions Runs postgres query with the provided parameters.
:type handle: object
:param handle: Object returned from task.validate(...).
:type transaction: str
:param transaction: PostgreSQL commands to be run inside a transaction.
:rtype: Transaction Success message. Error if failed.
"""
# Input param validation.
command = "BEGIN;" + "\n" + transaction + "\n" + "COMMIT;"
try:
cur = handle.cursor()
cur.execute(command)
# close communication with the PostgreSQL database server
cur.close()
# commit the changes
handle.commit()
pp.pprint("Transaction completed successfully ")
except (Exception, psycopg2.DatabaseError) as error:
pp.pprint(f"Error in transaction Reverting all other operations of a transactions {error}")
handle.rollback()
finally:
if handle:
handle.close()
pp.pprint("PostgreSQL connection is closed")
================================================
FILE: Postgresql/legos/postgresql_long_running_queries/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Postgresql/legos/postgresql_long_running_queries/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Postgresql/legos/postgresql_long_running_queries/postgresql_long_running_queries.json
================================================
{
"action_title": "Long Running PostgreSQL Queries",
"action_description": "Long Running PostgreSQL Queries",
"action_type": "LEGO_TYPE_POSTGRESQL",
"action_entry_function": "postgresql_long_running_queries",
"action_needs_credential": true,
"action_is_check": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_POSTGRESQL","CATEGORY_TYPE_POSTGRESQL_QUERY"],
"action_next_hop": ["adcf88e8035c594e599fc9a33c28c9099187211f6daccb9d3ab4e5d17993086f"],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Postgresql/legos/postgresql_long_running_queries/postgresql_long_running_queries.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional, Tuple
from tabulate import tabulate
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
interval: Optional[int] = Field(
default=5,
title='Interval (in seconds)',
description='Return queries running longer than interval')
def postgresql_long_running_queries_printer(output):
if output is None:
return
pprint.pprint(output)
def postgresql_long_running_queries(handle, interval: int = 5) -> Tuple:
"""postgresql_long_running_queries Runs postgres query with the provided parameters.
:type handle: object
:param handle: Object returned from task.validate(...).
:type interval: int
:param interval: Interval (in seconds).
:rtype: All the results of the query.
"""
# Input param validation.
# Multi-line will create an issue when we package the Legos.
# Hence concatinating it into a single line.
query = "SELECT pid, user, pg_stat_activity.query_start, now() - " \
"pg_stat_activity.query_start AS query_time, query, state " \
" FROM pg_stat_activity WHERE state = 'active' AND (now() - " \
f"pg_stat_activity.query_start) > interval '{interval} seconds';"
cur = handle.cursor()
cur.execute(query)
output = []
res = cur.fetchall()
data = []
for records in res:
result = {
"pid": records[0],
"user": records[1],
"query_start": records[2],
"query_time": records[3],
"query": records[4],
"state": records[5]
}
output.append(result)
data.append([records[0], records[4], records[5], records[3]])
if len(res) > 0:
headers = ["pid", "query", "state", "duration"]
print("\n")
output = tabulate(data, headers=headers, tablefmt="grid")
handle.commit()
cur.close()
handle.close()
if len(output) != 0:
return (False, output)
return (True, None)
================================================
FILE: Postgresql/legos/postgresql_read_query/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Postgresql/legos/postgresql_read_query/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Postgresql/legos/postgresql_read_query/postgresql_read_query.json
================================================
{
"action_title": "Read PostgreSQL Query",
"action_description": "Read PostgreSQL Query",
"action_type": "LEGO_TYPE_POSTGRESQL",
"action_entry_function": "postgresql_read_query",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_POSTGRESQL","CATEGORY_TYPE_POSTGRESQL_QUERY"]
}
================================================
FILE: Postgresql/legos/postgresql_read_query/postgresql_read_query.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import random
import string
from typing import List
from pydantic import BaseModel, Field
from tabulate import tabulate
class InputSchema(BaseModel):
query: str = Field(
title='Read Query',
description='''
Read query in Postgresql PREPARE statement format. For eg.
SELECT foo FROM table WHERE bar=$1 AND customer=$2.
The values for $1 and $2 should be passed in the params field as a tuple.
''')
params: List = Field(
None,
title='Parameters',
description='Parameters to the query in list format. For eg: [1, 2, "abc"]')
def postgresql_read_query_printer(output):
print("\n")
data = []
for records in output:
data.append(record for record in records)
print(tabulate(data, tablefmt="grid"))
return output
def postgresql_read_query(handle, query: str, params: list = ()) -> List:
"""postgresql_read_query Runs postgresql query with the provided parameters.
:type handle: object
:param handle: Object returned from task.validate(...).
:type query: str
:param query: Postgresql read query.
:type params: tuples
:param params: Parameters to the query in tuple format.
:rtype: List of Result of the Query.
"""
cur = handle.cursor()
# cur.execute(query, params)
random_id = ''.join(
[random.choice(string.ascii_letters + string.digits) for n in range(32)])
query = f"PREPARE psycop_{random_id} AS {query};"
if not params:
prepared_query = f"EXECUTE psycop_{random_id};"
else:
parameters_tuple = tuple(params)
## If there is only one tuple element, remove the trailing comma before format
if len(parameters_tuple) == 1:
tuple_string = str(parameters_tuple)
parameters_tuple = tuple_string[:-2] + tuple_string[-1]
prepared_query = f"EXECUTE psycop_{random_id} {params};"
cur.execute(query)
cur.execute(prepared_query)
res = cur.fetchall()
cur.close()
return res
================================================
FILE: Postgresql/legos/postgresql_show_tables/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Postgresql/legos/postgresql_show_tables/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Postgresql/legos/postgresql_show_tables/postgresql_show_tables.json
================================================
{
"action_title": "Show tables in PostgreSQL Database",
"action_description": "Show the tables existing in a PostgreSQL Database. We execute the following query to fetch this information SELECT * FROM pg_catalog.pg_tables WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema';",
"action_type": "LEGO_TYPE_POSTGRESQL",
"action_entry_function": "postgresql_show_tables",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_POSTGRESQL","CATEGORY_TYPE_POSTGRESQL_TABLE"]
}
================================================
FILE: Postgresql/legos/postgresql_show_tables/postgresql_show_tables.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from typing import List
from pydantic import BaseModel
from tabulate import tabulate
from unskript.legos.postgresql.postgresql_read_query.postgresql_read_query import postgresql_read_query
class InputSchema(BaseModel):
pass
def postgresql_show_tables_printer(output):
print("\n")
data = []
for records in output:
data.append(record for record in records)
print(tabulate(data, tablefmt="grid"))
return output
def postgresql_show_tables(handle) -> List:
"""ppostgresql_show_tables gives list of tables.
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: List of tables.
"""
query = ("SELECT * FROM pg_catalog.pg_tables WHERE schemaname != "
"'pg_catalog' AND schemaname != 'information_schema';")
return postgresql_read_query(handle, query, ())
================================================
FILE: Postgresql/legos/postgresql_stored_procedures/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Postgresql/legos/postgresql_stored_procedures/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Postgresql/legos/postgresql_stored_procedures/postgresql_stored_procedures.json
================================================
{
"action_title": "Call PostgreSQL Stored Procedure",
"action_description": "Call PostgreSQL Stored Procedure",
"action_type": "LEGO_TYPE_POSTGRESQL",
"action_entry_function": "postgresql_stored_procedures",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_NONE",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_POSTGRESQL","CATEGORY_TYPE_POSTGRESQL_QUERY"]
}
================================================
FILE: Postgresql/legos/postgresql_stored_procedures/postgresql_stored_procedures.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from typing import Any, List
import psycopg2
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
stored_procedure_name: str = Field(
title='Stored procedure name.',
description='PostgreSQL stored procedure name.')
params: list = Field(
None,
title='Parameters',
description='Parameters to the Stored Procedure in list format. For eg: [1, 2]')
def postgresql_stored_procedures(handle, stored_procedure_name: str, params: List = List[Any]):
"""postgresql_stored_procedures Runs postgres query with the provided parameters.
:type handle: object
:param handle: Object returned from task.validate(...).
:type stored_procedure_name: str
:param stored_procedure_name: PostgreSQL stored procedure name.
:type params: List
:param params: Parameters to the Stored Procedure in list format.
:rtype: All the results of the Stored Procedure .
"""
# Input param validation.
try:
cur = handle.cursor()
if params:
query = f"CALL {stored_procedure_name}"
cur.execute(query, params)
else:
query = f"CALL {stored_procedure_name}"
cur.execute(query)
# commit the transaction
handle.commit()
# Close communication with the PostgreSQL database
cur.close()
print("Call PostgreSQL Stored Procedures successfully")
except (Exception, psycopg2.DatabaseError) as error:
print(f"Error : {error}")
finally:
if handle:
handle.close()
print("PostgreSQL connection is closed")
================================================
FILE: Postgresql/legos/postgresql_write_query/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Prometheus/legos/prometheus_alerts_list/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Prometheus/legos/prometheus_alerts_list/prometheus_alerts_list.json
================================================
{
"action_title": "Get Prometheus rules",
"action_description": "Get Prometheus rules",
"action_type": "LEGO_TYPE_PROMETHEUS",
"action_entry_function": "prometheus_alerts_list",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_PROMETHEUS"],
"action_supports_iteration": true
}
================================================
FILE: Prometheus/legos/prometheus_alerts_list/prometheus_alerts_list.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from typing import List
from tabulate import tabulate
from pydantic import BaseModel
lego_title="Get All Prometheus Alerts"
lego_description="Get All Prometheus Alerts"
lego_type="LEGO_TYPE_PROMETHEUS"
class InputSchema(BaseModel):
pass
def prometheus_alerts_list_printer(output):
if output is None:
return
alerts = []
for alert in output:
for key, value in alert.items():
alerts.append([key, value])
print("\n")
print(tabulate(alerts))
def prometheus_alerts_list(handle) -> List[dict]:
"""prometheus_alerts_list Returns all alerts.
:type handle: object
:param handle: Object returned from task.validate(...).
:return: Alerts list.
"""
try:
params = {
"type": "alert"
}
response = handle.all_alerts(params)
except Exception as e:
print(f'Alerts failed, {str(e)}')
return [{"error": str(e)}]
result = []
if len(response['groups']) != 0:
for rules in response['groups']:
for rule in rules['rules']:
res = {}
res['name'] = rule['name']
result.append(res)
return result
================================================
FILE: Prometheus/legos/prometheus_get_all_metrics/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Prometheus/legos/prometheus_get_all_metrics/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Prometheus/legos/prometheus_get_all_metrics/prometheus_get_all_metrics.json
================================================
{
"action_title": "Get All Prometheus Metrics",
"action_description": "Get All Prometheus Metrics",
"action_type": "LEGO_TYPE_PROMETHEUS",
"action_entry_function": "prometheus_get_all_metrics",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_PROMETHEUS"]
}
================================================
FILE: Prometheus/legos/prometheus_get_all_metrics/prometheus_get_all_metrics.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from typing import List
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def prometheus_get_all_metrics_printer(output):
if output is None:
return
for metric in output:
print(metric)
def prometheus_get_all_metrics(handle) -> List:
"""prometheus_get_all_metrics Returns Prometheus Metrics.
:type handle: object
:param handle: Object returned from task.validate(...).
:return: Metrics list.
"""
return handle.all_metrics()
================================================
FILE: Prometheus/legos/prometheus_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Prometheus/legos/prometheus_get_handle/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Prometheus/legos/prometheus_get_handle/prometheus_get_handle.json
================================================
{
"action_title": "Get Prometheus handle",
"action_description": "Get Prometheus handle",
"action_type": "LEGO_TYPE_PROMETHEUS",
"action_entry_function": "prometheus_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_output_type": "ACTION_OUTPUT_TYPE_NONE",
"action_supports_iteration": false
}
================================================
FILE: Prometheus/legos/prometheus_get_handle/prometheus_get_handle.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def prometheus_get_handle(handle):
"""prometheus_get_handle returns the prometheus api connection handle.
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: prometheus Handle.
"""
return handle
================================================
FILE: Prometheus/legos/prometheus_get_metric_statistics/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Prometheus/legos/prometheus_get_metric_statistics/prometheus_get_metric_statistics.json
================================================
{
"action_title": "Get Prometheus Metric Statistics",
"action_description": "Get Prometheus Metric Statistics",
"action_type": "LEGO_TYPE_PROMETHEUS",
"action_entry_function": "prometheus_get_metric_range_data",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_PROMETHEUS"]
}
================================================
FILE: Prometheus/legos/prometheus_get_metric_statistics/prometheus_get_metric_statistics.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
from pydantic import BaseModel, Field
from tabulate import tabulate
class InputSchema(BaseModel):
promql_query: str = Field(
title="PromQL Query",
description="This is a PromQL query, a few examples can be found at \
https://prometheus.io/docs/prometheus/latest/querying/examples/"
)
timeSince: int = Field(
title="Time Since",
description="Starting from now, window (in seconds) \
for which you want to get the datapoints for.",
)
step: str = Field(
title="Step",
description="Query resolution step width in duration format or float number of seconds.",
)
graph_size: list = Field(
default=[16, 8],
title="Graph Size",
description="Size of the graph in inches (width, height), specified as a list.",
)
def prometheus_get_metric_range_data_printer(output):
if output is None:
return
plt.show()
pprint.pprint(output)
def prometheus_get_metric_range_data(
handle,
promql_query: str,
timeSince: int,
step: str,
graph_size: list = [16, 8]
) -> str:
"""prometheus_get_metric_statistics shows plotted values of Prometheus metric statistics.
:type handle: object
:param handle: Object returned from task.validate(...).
:type promql_query: string
:param PromQL Query: This is a PromQL query, a few examples can be found at
https://prometheus.io/docs/prometheus/latest/querying/examples/
:type timeSince: int
:param timeSince: Starting from now, window (in seconds) for which you want
to get the metric values for.
:type step: string
:param Step: Query resolution step width in duration format or float number of seconds
:type graph_size: list
:param graph_size: Size of the graph in inches (width, height), specified as a list.
:rtype: Shows plotted statistics.
"""
result = handle.custom_query_range(
query=promql_query,
start_time=datetime.utcnow() - timedelta(seconds=timeSince),
end_time=datetime.utcnow(),
step=step)
data = []
table_data = []
plt.figure(figsize=graph_size)
for each_result in result:
metric_data = {}
for each_metric_value in each_result["values"]:
metric_data[datetime.fromtimestamp(each_metric_value[0])] = each_metric_value[1]
data.append(metric_data)
for metric_values in data:
data_keys = metric_values.keys()
times_stamps = list(data_keys)
times_stamps.sort()
sorted_values = []
for time in times_stamps:
table_data.append([time, metric_values[time]])
sorted_values.append(metric_values[time])
plt.plot_date(times_stamps, sorted_values, "-o")
plt.autoscale(enable=True, axis='both', tight=None) # Enable autoscaling
plt.xlabel("Time")
plt.ylabel("Value")
plt.title(promql_query)
plt.grid(True)
head = ["Timestamp", "Value"]
table = tabulate(table_data, headers=head, tablefmt="grid")
return table
================================================
FILE: README.md
================================================
[![Contributors][contributors-shield]][contributors-url]
[![Forks][forks-shield]][forks-url]
[![Stargazers][stars-shield]][stars-url]
[![Issues][issues-shield]][issues-url]
[![Twitter][twitter-shield]][twitter-url]
![Actions][actions-shield]
![Runbooks][runbooks-shield]
# Runbooks.sh
### Empowering Cloud Automation, Together
**[Explore our docs](https://docs.unskript.com)**
*[Visit our blog](https://unskript.com/blog)* . *[Report Bug](https://github.com/unskript/Awesome-CloudOps-Automation/issues/new?assignees=&labels=&template=bug_report.md&title=)* . *[Request Feature](https://github.com/unskript/Awesome-CloudOps-Automation/issues/new?assignees=&labels=&template=feature_request.md&title=)*
# 🚀 Quick Start Guide
We recommend using our docker setup which comes with Jupyter runtime along with pre-built [actions](https://docs.unskript.com/unskript-product-documentation/actions/what-is-an-action) and [runbooks](https://docs.unskript.com/unskript-product-documentation/readme/what-is-a-runbook). Build your own actions and runbooks with ease!
## Get Started
1. Launch Docker
```
docker run -it -p 8888:8888 --user root unskript/awesome-runbooks:latest
```
2. Point your browser to http://127.0.0.1:8888/awesome.
## Advanced Usage
In this section, we'll explore advanced configurations that enable:
1. Custom Action and Runbook creation
2. Custom Action creation using OpenAI integration
### Custom Action and Runbook Creation
1. Clone this repository to your local machine.
```bash
git clone https://github.com/unskript/Awesome-CloudOps-Automation
cd Awesome-CloudOps-Automation
```
2. Launch Docker
- Use this command to create custom runbooks and actions. (update the first -v line if you used a different directory in step 1).
```bash
docker run -it -p 8888:8888 \
-v $HOME/Awesome-CloudOps-Automation/custom:/unskript/data \
-v $HOME/.unskript:/unskript/credentials \
-e ACA_AWESOME_MODE=1 \
--user root \
docker.io/unskript/awesome-runbooks:latest
```
3. Point your browser to http://127.0.0.1:8888/awesome.
### Custom Action Creation using OpenAI Integration
1. Clone this repository to your local machine if you haven't already.
```bash
git clone https://github.com/unskript/Awesome-CloudOps-Automation
cd Awesome-CloudOps-Automation
```
2. Launch Docker with OpenAI parameters:
- Use this command to create custom GenAI actions (update the first -v line if you used a different directory in step 1).
```bash
docker run -it -p 8888:8888 \
-v $HOME/Awesome-CloudOps-Automation/actions:/unskript/data/actions \
-v $HOME/Awesome-CloudOps-Automation/runbooks:/unskript/data/runbooks \
-v $HOME/.unskript:/unskript/credentials \
-e ACA_AWESOME_MODE=1 \
-e OPENAI_ORGANIZATION_ID=
================================================
FILE: Redis/legos/redis_delete_all_keys/__init__.py
================================================
================================================
FILE: Redis/legos/redis_delete_all_keys/redis_delete_all_keys.json
================================================
{
"action_title": "Delete All Redis Keys",
"action_description": "Delete All Redis keys",
"action_entry_function": "redis_delete_all_keys",
"action_type": "LEGO_TYPE_REDIS",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["delete"],
"action_nouns": ["all","keys","redis"],
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_REDIS"]
}
================================================
FILE: Redis/legos/redis_delete_all_keys/redis_delete_all_keys.py
================================================
#
# Copyright (c) 2022 unSkript.com
# All rights reserved.
#
import pprint
from typing import List
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def redis_delete_all_keys_printer(output):
if output is None:
return
pprint.pprint("Deleted Keys: ")
pprint.pprint(output)
def redis_delete_all_keys(handle) -> List:
"""redis_delete_all_keys deleted the pattern matched keys.
:rtype: List of all deleted keys.
"""
result = []
try:
for key in handle.scan_iter('*'):
result.append(key)
handle.delete(key)
except Exception as e:
print(e)
return result
================================================
FILE: Redis/legos/redis_delete_keys/README.md
================================================
[
================================================
FILE: Redis/legos/redis_delete_keys/__init__.py
================================================
================================================
FILE: Redis/legos/redis_delete_keys/redis_delete_keys.json
================================================
{
"action_title": "Delete Redis Keys",
"action_description": "Delete Redis keys matching pattern",
"action_entry_function": "redis_delete_keys",
"action_type": "LEGO_TYPE_REDIS",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["delete"],
"action_nouns": ["pattern","keys","redis"],
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_REDIS"]
}
================================================
FILE: Redis/legos/redis_delete_keys/redis_delete_keys.py
================================================
#
# Copyright (c) 2022 unSkript.com
# All rights reserved.
#
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
pattern: str = Field(
title='Pattern',
description='Pattern for the searched keys')
def redis_delete_keys_printer(output):
if output is None:
return
pprint.pprint("Deleted Keys: ")
pprint.pprint(output)
def redis_delete_keys(handle, pattern: str) -> List:
"""redis_delete_keys deleted the pattern matched keys.
:type pattern: string
:param pattern: Pattern for the searched keys.
:rtype: List of deleted keys.
"""
result = []
try:
for key in handle.scan_iter(pattern):
result.append(key)
handle.delete(key)
except Exception as e:
print(e)
return result
================================================
FILE: Redis/legos/redis_delete_stale_keys/README.md
================================================
[
================================================
FILE: Redis/legos/redis_delete_stale_keys/__init__.py
================================================
================================================
FILE: Redis/legos/redis_delete_stale_keys/redis_delete_stale_keys.json
================================================
{
"action_title": "Delete Redis Unused keys",
"action_description": "Delete Redis Unused keys given a time threshold in seconds",
"action_entry_function": "redis_delete_stale_keys",
"action_type": "LEGO_TYPE_REDIS",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["delete"],
"action_nouns": ["stale","keys","redis"],
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_REDIS"]
}
================================================
FILE: Redis/legos/redis_delete_stale_keys/redis_delete_stale_keys.py
================================================
#
# Copyright (c) 2022 unSkript.com
# All rights reserved.
#
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
time_in_sec: int = Field(
title='Time in Seconds',
description='Threshold Idle Time in Seconds')
def redis_delete_stale_keys_printer(output):
if output is None:
return
print("Deleted Keys: ")
pprint.pprint(output)
def redis_delete_stale_keys(handle, time_in_sec: int) -> Dict :
"""redis_delete_stale_keys returns deleted stale keys greater than given a threshold time
:type time_in_sec: int
:param time_in_sec: Threshold Idle Time in Seconds
:rtype: Dict of Deleted Unused keys
"""
try:
result = {}
for key in handle.scan_iter("*"):
idle_time = handle.object("idletime", key)
if idle_time > time_in_sec:
result[key]= idle_time
handle.delete(key)
except Exception as e:
result["error"] = e
return result
================================================
FILE: Redis/legos/redis_get_cluster_health/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Redis/legos/redis_get_cluster_health/__init__.py
================================================
================================================
FILE: Redis/legos/redis_get_cluster_health/redis_get_cluster_health.json
================================================
{
"action_title": "Get Redis cluster health",
"action_description": "This action gets the Redis cluster health.",
"action_type": "LEGO_TYPE_REDIS",
"action_entry_function": "redis_get_cluster_health",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_REDIS"],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Redis/legos/redis_get_cluster_health/redis_get_cluster_health.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
from typing import Optional, Tuple
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
client_threshold: Optional[int] = Field(
10000,
title='Client threshold',
description='Threshold for the number of connected clients considered abnormal. Default- 100 clients')
memory_threshold: Optional[int] = Field(
80,
title='Memory threshold (in %)',
description='Threshold for the percentage of memory usage considered abnormal. Default- 80%')
def redis_get_cluster_health_printer(output):
if output is None or not output[1]:
print("No health information available.")
return
status, analysis = output
print("\nRedis Health Info:")
if status:
print("Status: Healthy")
else:
print("Status: Unhealthy")
for key, value in analysis.items():
if key != 'abnormal_metrics':
print(f"{key}: {value}")
if 'abnormal_metrics' in analysis:
print("\nAbnormal Metrics Detected:")
for metric, message in analysis['abnormal_metrics']:
print(f"{metric}: {message}")
def redis_get_cluster_health(handle, client_threshold: int = 10000, memory_threshold: int = 80) -> Tuple:
"""Returns the health of the Redis instance.
:type handle: object
:param handle: Redis connection object
:type client_threshold: int
:param client_threshold: Threshold for the number of connected clients considered abnormal
:type memory_threshold: int
:param memory_threshold: Threshold for the percentage of memory usage considered abnormal
:rtype: Tuple containing a boolean indicating overall health and a dictionary with detailed information
"""
# Metrics that need to be checked
health_metrics = [
'uptime_in_seconds',
'connected_clients',
'used_memory',
'maxmemory',
'rdb_last_bgsave_status',
'aof_last_bgrewrite_status',
'aof_last_write_status',
]
health_info = {}
abnormal_metrics = []
try:
general_info = handle.info()
if not isinstance(general_info, dict):
raise Exception("Unexpected format for general info")
# Iterate through the health metrics to check for soecific keys
for key in health_metrics:
value = general_info.get(key)
if value is None:
continue
health_info[key] = value
# Check if connected clients exceed the threshold
if key == 'connected_clients' and int(value) > client_threshold:
abnormal_metrics.append((key, f"High number of connected clients: {value}"))
# Check if memory usage exceeds the threshold
if key == 'used_memory' and general_info.get('maxmemory') and int(value) / int(general_info['maxmemory']) * 100 > memory_threshold:
abnormal_metrics.append((key, f"Memory utilization is above {memory_threshold}%: {value}"))
# Check for abnormal statuses
if key in ['rdb_last_bgsave_status', 'aof_last_bgrewrite_status', 'aof_last_write_status'] and value != 'ok':
abnormal_metrics.append((key, f"Status not OK: {value}"))
# Append abnormal metrics if any are found
if abnormal_metrics:
health_info['abnormal_metrics'] = abnormal_metrics
return (False, health_info)
return (True, health_info)
except Exception as e:
raise e
================================================
FILE: Redis/legos/redis_get_handle/README.md
================================================
[
================================================
FILE: Redis/legos/redis_get_keys_count/__init__.py
================================================
================================================
FILE: Redis/legos/redis_get_keys_count/redis_get_keys_count.json
================================================
{
"action_title": "Get Redis keys count",
"action_description": "Get Redis keys count matching pattern (default: '*')",
"action_entry_function": "redis_get_keys_count",
"action_type": "LEGO_TYPE_REDIS",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_INT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_verbs": ["get"],
"action_nouns": ["count","keys","redis"],
"action_categories": ["CATEGORY_TYPE_INFORMATION" , "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_REDIS"]
}
================================================
FILE: Redis/legos/redis_get_keys_count/redis_get_keys_count.py
================================================
#
# Copyright (c) 2022 unSkript.com
# All rights reserved.
#
import pprint
from beartype import beartype
from pydantic import BaseModel, Field
from typing import Optional
class InputSchema(BaseModel):
pattern: Optional[str] = Field(
default='*',
title='Pattern',
description='Pattern for the searched keys')
@beartype
def redis_get_keys_count_printer(output):
if output is None:
return
pprint.pprint({"Matched keys count": output})
@beartype
def redis_get_keys_count(handle, pattern: str="*"):
"""redis_get_keys_count returns the matched keys count.
:type pattern: string
:param pattern: Pattern for the searched keys.
:rtype: Matched keys count.
"""
output = 0
for key in handle.scan_iter(pattern):
output += 1
return output
================================================
FILE: Redis/legos/redis_get_metrics/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Redis/legos/redis_get_metrics/__init__.py
================================================
================================================
FILE: Redis/legos/redis_get_metrics/redis_get_metrics.json
================================================
{
"action_title": "Get Redis metrics",
"action_description": "This action fetched redis metrics like index size, memory utilization.",
"action_type": "LEGO_TYPE_REDIS",
"action_entry_function": "redis_get_metrics",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": ["CATEGORY_TYPE_INFORMATION" , "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_REDIS"]
}
================================================
FILE: Redis/legos/redis_get_metrics/redis_get_metrics.py
================================================
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
from typing import Dict
from pydantic import BaseModel
from tabulate import tabulate
class InputSchema(BaseModel):
pass
def redis_get_metrics_printer(output):
if output is None:
return
print("\nRedis Metrics: ")
headers = ["Metric", "Value"]
data = list(output.items())
print(tabulate(data, headers, tablefmt="pretty"))
def bytes_to_human_readable(bytes, units=[' bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']):
# Return a human-readable string representation of bytes.
return str(bytes) + units[0] if bytes < 1024 else bytes_to_human_readable(bytes >> 10, units[1:])
def redis_get_metrics(handle) -> Dict:
"""
redis_get_metrics returns redis metrics like index size, memory utilization.
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: Dict containing index size and memory usage metrics
"""
metrics = {}
try:
# Getting the information about the Redis server
info = handle.info()
# Initialize keys counter
total_keys = 0
# Iterate over all dbs in the info output
for key in info:
if key.startswith('db'):
total_keys += info[key]['keys']
metrics['index_size'] = total_keys # Total number of keys.
metrics['memory_utilization'] = bytes_to_human_readable(int(info['used_memory']))
metrics['dataset_size'] = bytes_to_human_readable(int(info['used_memory_dataset']))
except Exception as e:
raise e
return metrics
================================================
FILE: Redis/legos/redis_list_large_keys/README.md
================================================
[
================================================
FILE: Redis/legos/redis_list_large_keys/__init__.py
================================================
================================================
FILE: Redis/legos/redis_list_large_keys/redis_list_large_keys.json
================================================
{
"action_title": " List Redis Large keys",
"action_description": "Find Redis Large keys given a size threshold in bytes",
"action_entry_function": "redis_list_large_keys",
"action_type": "LEGO_TYPE_REDIS",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_is_check": true,
"action_verbs": ["list"],
"action_nouns": ["large","keys","redis"],
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_REDIS"],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: Redis/legos/redis_list_large_keys/redis_list_large_keys.py
================================================
#
# Copyright (c) 2022 unSkript.com
# All rights reserved.
#
from typing import Optional, Tuple
from pydantic import BaseModel, Field
from tabulate import tabulate
class InputSchema(BaseModel):
size_in_bytes: Optional[int] = Field(
5368709120, # 5GB
title='Size in Bytes',
description='Threshold Size of Key in Bytes')
def redis_list_large_keys_printer(output):
status, data = output
if status:
print("There are no large keys")
return
else:
flattened_data = []
for item in data:
for key, value in item.items():
flattened_data.append([key.decode(), value])
headers = ["Key Name", "Key Size (Bytes)"]
print("Large keys:")
print(tabulate(flattened_data, headers=headers, tablefmt="grid"))
def redis_list_large_keys(handle, size_in_bytes: int = 5368709120) -> Tuple :
"""redis_list_large_keys returns deleted stale keys greater than given a threshold time
:type size_in_bytes: int
:param size_in_bytes: Threshold Size of Key in Bytes
:rtype: Dict of Large keys
"""
try:
result = []
keys = handle.keys('*')
for key in keys:
value = handle.memory_usage(key)
if value > int(size_in_bytes):
large_key = {"large_key": key.decode('utf-8'), "value": value}
result.append(large_key)
except Exception as e:
raise e
if result:
return (False, result)
return (True, None)
================================================
FILE: Rest/README.md
================================================
# Rest Actions
* [Get REST handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Rest/legos/rest_get_handle/README.md): Get REST handle
* [Call REST Methods](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Rest/legos/rest_methods/README.md): Call REST Methods.
================================================
FILE: Rest/__init__.py
================================================
================================================
FILE: Rest/legos/__init__.py
================================================
================================================
FILE: Rest/legos/rest_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: SSH/legos/ssh_execute_remote_command/__init__.py
================================================
================================================
FILE: SSH/legos/ssh_execute_remote_command/ssh_execute_remote_command.json
================================================
{
"action_title": "SSH Execute Remote Command",
"action_description": "SSH Execute Remote Command",
"action_type": "LEGO_TYPE_SSH",
"action_entry_function": "ssh_execute_remote_command",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_verbs": [
"execute"
],
"action_nouns": [
"ssh",
"command"
],
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_SSH"]
}
================================================
FILE: SSH/legos/ssh_execute_remote_command/ssh_execute_remote_command.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List, Optional, Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
hosts: List[str] = Field(
title='Hosts',
description='List of hosts to connect to. For eg. ["host1", "host2"].'
)
command: str = Field(
title='Command',
description='Command to be executed on the remote server.'
)
sudo: Optional[bool] = Field(
default=False,
title='Run with sudo',
description='Run the command with sudo.'
)
proxy_host: Optional[str] = Field(
title='Proxy host',
description='Override the proxy host provided in the credentials. \
It still uses the proxy_user and port from the credentials.'
)
def ssh_execute_remote_command_printer(output):
if output is None:
return
print("\n")
pprint.pprint(output)
def ssh_execute_remote_command(
sshClient,
hosts: List[str],
command: str,
sudo: bool = False,
proxy_host: str = None
) -> Dict:
"""ssh_execute_remote_command executes the given command on the remote
:type hosts: List[str]
:param hosts: List of hosts to connect to. For eg. ["host1", "host2"].
:type command: str
:param command: Command to be executed on the remote server.
:type sudo: bool
:param sudo: Run the command with sudo.
:type proxy_host: str
:param proxy_host: Optional proxy host to use.
:rtype: dict of command output
"""
client = sshClient(hosts, proxy_host)
runCommandOutput = client.run_command(command=command, sudo=sudo)
client.join()
res = {}
for host_output in runCommandOutput:
hostname = host_output.host
output = []
for line in host_output.stdout:
output.append(line)
o = "\n".join(output)
res[hostname] = o
return res
================================================
FILE: SSH/legos/ssh_find_large_files/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: SSH/legos/ssh_find_large_files/__init__.py
================================================
================================================
FILE: SSH/legos/ssh_find_large_files/ssh_find_large_files.json
================================================
{
"action_title": "SSH: Locate large files on host",
"action_description": "This action scans the file system on a given host and returns a dict of large files. The command used to perform the scan is \"find inspect_folder -type f -exec du -sk '{}' + | sort -rh | head -n count\"",
"action_type": "LEGO_TYPE_SSH",
"action_entry_function": "ssh_find_large_files",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_verbs": [
"find",
"locate"
],
"action_nouns": [
"ssh",
"files"
],
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_SSH"]
}
================================================
FILE: SSH/legos/ssh_find_large_files/ssh_find_large_files.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
host: str = Field(
title='Host',
description='Host to connect to. Eg 10.10.10.10'
)
proxy_host: Optional[str] = Field(
title='Proxy host',
description='Override the proxy host provided in the credentials. \
It still uses the proxy_user and port from the credentials.'
)
inspect_folder: str = Field(
title='Inspect Folder',
description='''Folder to inspect on the remote host. Folders are scanned using \
"find inspect_folder -type f -exec du -sk '{}' + | sort -rh | head -n count"'''
)
threshold: Optional[int] = Field(
default=100,
title="Size Threshold",
description="Threshold the files on given size. Specified in Mb. Default is 100Mb"
)
count: Optional[int] = Field(
default=10,
title="Count",
description="Number of files to report from the scan. Default is 10"
)
sudo: Optional[bool] = Field(
default=False,
title='Run with sudo',
description='Run the scan with sudo.'
)
def ssh_find_large_files_printer(output):
if output is None:
return
print("\n")
pprint.pprint(output)
def ssh_find_large_files(
sshClient,
host: str,
inspect_folder: str,
proxy_host: str = None,
threshold: int = 0,
sudo: bool = False,
count: int = 10) -> dict:
"""ssh_find_large_files scans the file system on a given host
:type host: str
:param host: Host to connect to. Eg 10.10.10.10.
:type inspect_folder: str
:param inspect_folder: Folder to inspect on the remote host.
:type proxy_host: str
:param proxy_host: Proxy Host to connect host via. Eg 10.10.10.10.
:type sudo: bool
:param sudo: Run the scan with sudo.
:type threshold: bool
:param threshold: Threshold the files on given size. Specified in Mb. Default is 100Mb.
:type count: bool
:param count: Number of files to report from the scan. Default is 10.
:rtype:
"""
client = sshClient([host], proxy_host)
# find size in Kb
command = "find " + inspect_folder + \
" -type f -exec du -sm '{}' + | sort -rh | head -n " + str(count)
runCommandOutput = client.run_command(command=command, sudo=sudo)
client.join()
res = {}
for host_output in runCommandOutput:
for line in host_output.stdout:
# line is of the form {size} {fullfilename}
(size, filename) = line.split()
if int(size) > threshold:
res[filename] = int(size)
return res
================================================
FILE: SSH/legos/ssh_get_ec2_instances_with_low_available_disk_size/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: SSH/legos/ssh_get_ec2_instances_with_low_available_disk_size/__init__.py
================================================
================================================
FILE: SSH/legos/ssh_get_ec2_instances_with_low_available_disk_size/ssh_get_ec2_instances_with_low_available_disk_size.json
================================================
{
"action_title": "Get AWS EC2 with low available disk size",
"action_description": "This action retrieves the public IP's of AWS EC2 instances that have low available disk space.",
"action_type": "LEGO_TYPE_SSH",
"action_entry_function": "ssh_get_ec2_instances_with_low_available_disk_size",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2", "CATEGORY_TYPE_SSH"],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: SSH/legos/ssh_get_ec2_instances_with_low_available_disk_size/ssh_get_ec2_instances_with_low_available_disk_size.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Tuple, Optional
from pydantic import BaseModel, Field
from unskript.legos.ssh.ssh_execute_remote_command.ssh_execute_remote_command import ssh_execute_remote_command
class InputSchema(BaseModel):
hosts: list = Field(
...,
description='List of hosts to connect to. For eg. ["host1", "host2"].',
title='Hosts',
)
threshold: Optional[float] = Field(
default = 5, description='The disk size threshold in GB. Default- 5GB', title='Threshold(in GB)'
)
def ssh_get_ec2_instances_with_low_available_disk_size_printer(output):
if output is None:
return
pprint.pprint(output)
def ssh_get_ec2_instances_with_low_available_disk_size(handle, hosts: list, threshold: float = 5) -> Tuple:
"""Checks the available root disk size and compares it with the threshold.
:type handle: SSH Client object
:param handle: The SSH client.
:type hosts: list
:param hosts: List of hosts to connect to.
:type threshold: float
:param threshold: The disk size threshold in GB.
:rtype: Status, list of dicts of hosts with available disk size less than the threshold
"""
# Command to determine the root disk
determine_disk_command = "lsblk -o NAME,MOUNTPOINT | grep ' /$' | awk '{print $1}' | tr -d '└─-'"
disks = ssh_execute_remote_command(handle, hosts, determine_disk_command)
# Check if all disks are the same for all hosts
unique_disks = set(disks.values())
if len(unique_disks) > 1:
disk_details = ', '.join([f"{host}: {disk}" for host, disk in disks.items()])
raise ValueError(f"The provided hosts have different disk names. Details: {disk_details}. Please execute them one by one.")
disk = unique_disks.pop()
# Create the command using the determined common disk
command = f"df -h /dev/{disk.strip()} | tail -1"
print(f"Executing command: {command}")
outputs = ssh_execute_remote_command(handle, hosts, command)
result = []
for host, host_output in outputs.items():
try:
# Extracting available space from the output
parts = host_output.split()
if len(parts) > 4:
available = parts[3] # Assuming 'Available' column is the 4th one
available_size = float(available[:-1]) # excluding the 'G'
if available_size < threshold:
result.append({host: available_size})
else:
print(f'For host {host}, the output is not in expected format.')
pass
except Exception as e:
raise e
if result:
return (False, result)
return (True, None)
================================================
FILE: SSH/legos/ssh_get_ec2_instances_with_low_memory_size/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: SSH/legos/ssh_get_ec2_instances_with_low_memory_size/__init__.py
================================================
================================================
FILE: SSH/legos/ssh_get_ec2_instances_with_low_memory_size/ssh_get_ec2_instances_with_low_memory_size.json
================================================
{
"action_title": "Get AWS EC2 with low free memory size",
"action_description": "This action uses SSH to identify AWS EC2 instances with low available memory.",
"action_type": "LEGO_TYPE_SSH",
"action_entry_function": "ssh_get_ec2_instances_with_low_memory_size",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE", "CATEGORY_TYPE_AWS", "CATEGORY_TYPE_AWS_EC2", "CATEGORY_TYPE_SSH"],
"action_next_hop": [""],
"action_next_hop_parameter_mapping": {}
}
================================================
FILE: SSH/legos/ssh_get_ec2_instances_with_low_memory_size/ssh_get_ec2_instances_with_low_memory_size.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List, Optional, Tuple
from pydantic import BaseModel, Field
from unskript.legos.ssh.ssh_execute_remote_command.ssh_execute_remote_command import ssh_execute_remote_command
class InputSchema(BaseModel):
hosts: list = Field(
...,
description='List of hosts to connect to. For eg. ["host1", "host2"].',
title='Hosts',
)
threshold: Optional[float] = Field(
default= 400,
description='Optional memory size threshold in MB. Default- 400 MB',
title='Threshold(in MB)',
)
def ssh_get_ec2_instances_with_low_memory_size_printer(output):
if output is None:
return
pprint.pprint(output)
def ssh_get_ec2_instances_with_low_memory_size(handle, hosts: list, threshold: float = 400) -> Tuple:
"""Get EC2 instances with free memory size less than a given threshold.
:type handle: SSH Client object
:param handle: The SSH client.
:type hosts: list
:param hosts: List of hosts to connect to.
:type threshold: float
:param threshold: Optional memory size threshold in MB.
:rtype: Status, list of dicts of hosts with available disk size less than the threshold along with the size in MB
"""
command = "free -m| awk 'NR==2{printf \"%.2f\", $7}'"
output = ssh_execute_remote_command(handle, hosts, command)
result = []
hosts_with_less_memory = {}
for host, host_output in output.items():
try:
available_memory = float(host_output)
# Compare the available memory size with the threshold
if available_memory < threshold:
hosts_with_less_memory[host] = available_memory
result.append(hosts_with_less_memory)
except Exception as e:
raise e
if len(result) != 0:
return (False, result)
return (True, None)
================================================
FILE: SSH/legos/ssh_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: SSH/legos/ssh_get_handle/__init__.py
================================================
================================================
FILE: SSH/legos/ssh_get_handle/ssh_get_handle.json
================================================
{
"action_title": "Get SSH handle",
"action_description": "Get SSH handle",
"action_type": "LEGO_TYPE_SSH",
"action_entry_function": "ssh_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_supports_iteration": false,
"action_verbs": [
"get"
],
"action_nouns": [
"ssh",
"handle"
]
}
================================================
FILE: SSH/legos/ssh_get_handle/ssh_get_handle.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def ssh_get_handle(handle):
"""
ssh_get_handle returns the SSH handle.
:rtype: SSH handle.
"""
return handle
================================================
FILE: SSH/legos/ssh_get_hosts_with_low_disk_latency/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: SSH/legos/ssh_get_hosts_with_low_disk_latency/__init__.py
================================================
================================================
FILE: SSH/legos/ssh_get_hosts_with_low_disk_latency/ssh_get_hosts_with_low_disk_latency.json
================================================
{
"action_title": "Get hosts with low disk latency ",
"action_description": "This action checks the disk latency on the provided hosts by running a disk write command and measuring the time taken. If the time taken exceeds a given threshold, the host is flagged as having potential latency issues.",
"action_type": "LEGO_TYPE_SSH",
"action_entry_function": "ssh_get_hosts_with_low_disk_latency",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": [
""
],
"action_next_hop_parameter_mapping": {},
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: SSH/legos/ssh_get_hosts_with_low_disk_latency/ssh_get_hosts_with_low_disk_latency.py
================================================
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
from typing import Tuple, Optional
from pydantic import BaseModel, Field
from unskript.legos.ssh.ssh_execute_remote_command.ssh_execute_remote_command import ssh_execute_remote_command
class InputSchema(BaseModel):
hosts: list = Field(
...,
description='List of hosts to connect to. For eg. ["host1", "host2"].',
title='Lis of Hosts',
)
threshold: Optional[float] = Field(
10,
description='Time threshold in seconds to flag a host for potential latency issues.',
title='Threshold (in seconds)',
)
def ssh_get_hosts_with_low_disk_latency_printer(output):
if not output:
print("No issues found.")
return
status, problematic_hosts = output
if not status:
print("Hosts with potential disk latency issues:", ', '.join(problematic_hosts))
else:
print("No latency issues found on any hosts.")
def ssh_get_hosts_with_low_disk_latency(handle, hosts: list, threshold: int = 5) -> Tuple:
"""
ssh_get_hosts_with_low_disk_latency Checks the disk latency on the provided hosts by running a disk write command and
measuring the time taken. If the time taken exceeds a given threshold, the host is
flagged as having potential latency issues.
:type handle: SSH Client object
:param handle: The SSH client.
:type hosts: list
:param hosts: List of hosts to connect to.
:type threshold: float
:param threshold: Time threshold in seconds to flag a host for potential latency issues.
:return: Status and the hosts with potential latency issues if any.
"""
print("Starting the disk latency check...")
latency_command = "/usr/bin/time -p dd if=/dev/zero of=~/test.png bs=8192 count=10240 oflag=direct 2>&1"
outputs = ssh_execute_remote_command(handle, hosts, latency_command)
# Cleanup: Remove the created test file
print("Cleaning up resources...")
cleanup_command = "rm ~/test.png"
ssh_execute_remote_command(handle, hosts, cleanup_command)
hosts_with_issues = []
for host, output in outputs.items():
if not output.strip():
print(f"Command execution failed or returned empty output on host {host}.")
continue
for line in output.splitlines():
if line.startswith("real"):
time_line = line
break
else:
print(f"Couldn't find 'real' time in output for host {host}.")
continue
# Parse the time and check against the threshold
try:
total_seconds = float(time_line.split()[1])
if total_seconds > threshold:
hosts_with_issues.append(host)
except Exception as e:
raise e
if hosts_with_issues:
return (False, hosts_with_issues)
return (True, None)
================================================
FILE: SSH/legos/ssh_restart_service_using_sysctl/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: SSH/legos/ssh_restart_service_using_sysctl/__init__.py
================================================
================================================
FILE: SSH/legos/ssh_restart_service_using_sysctl/ssh_restart_service_using_sysctl.json
================================================
{
"action_title": "SSH Restart Service Using sysctl",
"action_description": "SSH Restart Service Using sysctl",
"action_type": "LEGO_TYPE_SSH",
"action_entry_function": "ssh_restart_service_using_sysctl",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_verbs": [
"restart"
],
"action_nouns": [
"sysctl"
],
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_SSH"]
}
================================================
FILE: SSH/legos/ssh_restart_service_using_sysctl/ssh_restart_service_using_sysctl.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List, Optional, Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
hosts: List[str] = Field(
title='Hosts',
description='List of hosts to connect to. For eg. ["host1", "host2"].'
)
proxy_host: Optional[str] = Field(
title='Proxy host',
description='Override the proxy host provided in the credentials. \
It still uses the proxy_user and port from the credentials.'
)
service_name: str = Field(
title='Service Name',
description='Service name to restart.'
)
sudo: Optional[bool] = Field(
default=False,
title='Restart with sudo',
description='Restart service with sudo.'
)
def ssh_restart_service_using_sysctl_printer(output):
if output is None:
return
print("\n")
pprint.pprint(output)
def ssh_restart_service_using_sysctl(
sshClient,
hosts: List[str],
service_name: str,
sudo: bool = False,
proxy_host: str = None
) -> Dict:
"""ssh_restart_service_using_sysctl restart Service Using sysctl
:type hosts: List[str]
:param hosts: List of hosts to connect to. For eg. ["host1", "host2"].
:type service_name: str
:param service_name: Service name to restart.
:type sudo: bool
:param sudo: Restart service with sudo.
:type proxy_host: str
:param proxy_host: Optional proxy host to use.
:rtype:
"""
client = sshClient(hosts, proxy_host)
runCommandOutput = client.run_command(command=f"systemctl restart {service_name}", sudo=sudo)
client.join()
res = {}
for host_output in runCommandOutput:
hostname = host_output.host
output = list(host_output.stdout)
res[hostname] = output
return res
================================================
FILE: SSH/legos/ssh_scp/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: SSH/legos/ssh_scp/__init__.py
================================================
================================================
FILE: SSH/legos/ssh_scp/ssh_scp.json
================================================
{
"action_title": "SCP: Remote file transfer over SSH",
"action_description": "Copy files from or to remote host. Files are copied over SCP. ",
"action_type": "LEGO_TYPE_SSH",
"action_entry_function": "ssh_scp",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_NONE",
"action_supports_iteration": true,
"action_verbs": [
"copy",
"transfer",
"scp"
],
"action_nouns": [
"ssh",
"file"
],
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_SSH"]
}
================================================
FILE: SSH/legos/ssh_scp/ssh_scp.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
host: str = Field(
title='Host',
description='Hosts to connect to. For eg. "10.10.10.10"'
)
proxy_host: Optional[str] = Field(
title='Proxy host',
description='Override the proxy host provided in the credentials. \
It still uses the proxy_user and port from the credentials.'
)
remote_file: str = Field(
title='Remote File',
description='Filename on the remote server. Eg /home/ec2-user/my_remote_file'
)
local_file: str = Field(
title="Local File",
description='Filename on the unSkript proxy. Eg /tmp/my_local_file'
)
direction: bool = Field(
default=True,
title="Receive",
description="Direction of the copy operation. Default is receive-from-remote-server"
)
def ssh_scp_printer(output):
if output is None:
return
print("\n")
pprint.pprint(output)
def ssh_scp(
sshClient,
host: str,
remote_file: str,
local_file: str,
proxy_host: str = None,
direction: bool = True):
"""ssh_scp Copy files from or to remote host.
:type host: str
:param host: Host to connect to. Eg 10.10.10.10.
:type remote_file: str
:param remote_file: Filename on the remote server. Eg /home/ec2-user/my_remote_file
:type local_file: str
:param local_file: Filename on the unSkript proxy. Eg /tmp/my_local_file
:type proxy_host: str
:param proxy_host: Proxy Host to connect host via. Eg 10.10.10.10.
:type direction: bool
:param direction: Direction of the copy operation. Default is receive-from-remote-server
:rtype:
"""
client = sshClient([host], proxy_host)
client.copy_file(local_file, remote_file, direction)
client.join()
================================================
FILE: SalesForce/README.md
================================================
# SalesForce Actions
* [Assign Salesforce Case](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/SalesForce/legos/salesforce_assign_case/README.md): Assign a Salesforce case
* [Change Salesforce Case Status](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/SalesForce/legos/salesforce_case_change_status/README.md): Change Salesforce Case Status
* [Create Salesforce Case](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/SalesForce/legos/salesforce_create_case/README.md): Create a Salesforce case
* [Delete Salesforce Case](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/SalesForce/legos/salesforce_delete_case/README.md): Delete a Salesforce case
* [Get Salesforce Case Info](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/SalesForce/legos/salesforce_get_case/README.md): Get a Salesforce case info
* [Get Salesforce Case Status](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/SalesForce/legos/salesforce_get_case_status/README.md): Get a Salesforce case status
* [Get Salesforce handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/SalesForce/legos/salesforce_get_handle/README.md): Get Salesforce handle
* [Search Salesforce Case](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/SalesForce/legos/salesforce_search_case/README.md): Search a Salesforce case
* [Update Salesforce Case](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/SalesForce/legos/salesforce_update_case/README.md): Update a Salesforce case
================================================
FILE: SalesForce/__init__.py
================================================
================================================
FILE: SalesForce/legos/__init__.py
================================================
================================================
FILE: SalesForce/legos/salesforce_assign_case/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: SalesForce/legos/salesforce_case_change_status/__init__.py
================================================
================================================
FILE: SalesForce/legos/salesforce_case_change_status/salesforce_case_change_status.json
================================================
{
"action_title": "Change Salesforce Case Status",
"action_description": "Change Salesforce Case Status",
"action_type": "LEGO_TYPE_SALESFORCE",
"action_entry_function": "salesforce_case_change_status",
"action_needs_credential": true,
"action_supports_poll": false,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": false,
"action_verbs": [
"change"
],
"action_nouns": [
"salesforce",
"case",
"status"
],
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_SALESFORCE"]
}
================================================
FILE: SalesForce/legos/salesforce_case_change_status/salesforce_case_change_status.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from unskript.enums.salesforce_enums import Status
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
case_number: str = Field(
title='Case Number',
description='The Case number to get the details about the case')
status: Status = Field(
title='Status',
description='The status of the case. Default is "New"')
def salesforce_case_change_status_printer(output):
if output is None:
return
print(output)
def salesforce_case_change_status(handle, case_number: str, status: Status) -> str:
"""salesforce_case_change_status change status for given case
:type case_number: str
:param case_number: The Case number to get the details about the case
:type status: Status
:param status: Salesforce Case Status. Possible values: New|Working|Escalated
:rtype: str
"""
record_id = handle.query(f"SELECT Id FROM Case WHERE CaseNumber = '{case_number}'")
if not record_id['records']:
return "Invalid Case Number"
status = status.value if status else None
record_id = record_id['records'][0]['Id']
data = {
"Status": status
}
resp = handle.Case.update(record_id, data)
if resp == 204:
return f"Status change successfully for case {case_number} "
return "Error Occurred"
================================================
FILE: SalesForce/legos/salesforce_create_case/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: SalesForce/legos/salesforce_create_case/__init__.py
================================================
================================================
FILE: SalesForce/legos/salesforce_create_case/salesforce_create_case.json
================================================
{
"action_title": "Create Salesforce Case",
"action_description": "Create a Salesforce case",
"action_type": "LEGO_TYPE_SALESFORCE",
"action_entry_function": "salesforce_create_case",
"action_needs_credential": true,
"action_supports_poll": false,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": false,
"action_verbs": [
"create"
],
"action_nouns": [
"salesforce",
"case"
],
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_SALESFORCE"]
}
================================================
FILE: SalesForce/legos/salesforce_create_case/salesforce_create_case.py
================================================
import pprint
import json
from typing import Dict, Optional
from pydantic import BaseModel, Field
from tabulate import tabulate
from unskript.enums.salesforce_enums import Status, CaseOrigin, CaseType, Priority, CaseReason, \
PotentialLiability, SLAViolation
pp = pprint.PrettyPrinter(indent=4)
class AdditionalInformation(BaseModel):
product: Optional[str] = Field(
title='Product',
description='Product associated with case')
engineering_req_number: Optional[str] = Field(
title='Engineering Req Number',
description='Engineering Req Number')
potential_liability: Optional[PotentialLiability] = Field(
title='Potential Liability',
description='Potential Liability')
sla_violation: Optional[SLAViolation] = Field(
title='SLA Violation',
description='SLA Violation')
class WebInformation(BaseModel):
web_email: Optional[str] = Field(
title='Web Email',
description='Web Email')
web_company: Optional[str] = Field(
title='Web Company',
description='Web Company')
web_name: Optional[str] = Field(
title='Web Name',
description='Web Name')
web_phone: Optional[str] = Field(
title='Web Phone',
description='Web Phone')
class InputSchema(BaseModel):
status: Status = Field(
title='Status',
description='The status of the case. Default is "New"')
priority: Optional[Priority] = Field(
title='Priority',
description='The priority of the case')
case_origin: CaseOrigin = Field(
title='Case Origin',
description='The origin of the case')
contact_name: Optional[str] = Field(
title='Contact Name',
description='The name of the contact')
account_name: Optional[str] = Field(
title='Account Name',
description='The name of the Account')
type: Optional[CaseType] = Field(
title='Type',
description='The type of the case')
case_reason: Optional[CaseReason] = Field(
title='Case Reason ',
description='The Reason for the case')
subject: Optional[str] = Field(
title='Subject',
description='Title of the case')
description: Optional[str] = Field(
title='Description',
description='A short description about the case')
internal_comments: Optional[str] = Field(
title='Internal Comments',
description='Comments about thw case')
additional_information: Optional[AdditionalInformation] = Field(...)
web_information: Optional[WebInformation] = Field(None, alias='Web Information')
def salesforce_create_case_printer(output):
if output is None:
return
print("\n")
print(json.dumps(output, indent=4))
case_number = output.get("CaseNumber")
data = []
data.append(case_number)
print("\n")
od = tabulate([data], headers=['CaseNumber'], tablefmt="grid")
print(od)
def salesforce_create_case(handle,
status: Status,
case_origin: CaseOrigin,
priority: Priority = Priority.LOW,
contact_name: str = "",
account_name: str = "",
type: CaseType = CaseType.ELECTRONIC,
case_reason: CaseReason = CaseReason.OTHER,
subject: str = "",
description: str = "",
internal_comments: str = "",
additional_information: dict = None,
web_information: dict = None,
) -> Dict:
"""salesforce_create_case create salesforce case
:type status: Status
:param status: The status of the case. Default is "New"
:type case_origin: CaseOrigin
:param case_origin: The origin of the case.
:type priority: Priority
:param priority: The priority of the case.
:type contact_name: str
:param contact_name: The name of the contact.
:type account_name: str
:param account_name: The name of the Account.
:type type: CaseType
:param type: The type of the case.
:type case_reason: CaseReason
:param case_reason: The Reason for the case.
:type subject: str
:param subject: Title of the case.
:type description: str
:param description: A short description about the case.
:type internal_comments: str
:param internal_comments: Comments about thw case.
:rtype:
"""
#salesforce_create_case creates a case in Salesforce.
contact_id = ""
account_id = ""
status = status.value if status else None
case_origin = case_origin.value if case_origin else None
type = type.value if type else None
priority = priority.value if priority else None
case_reason = case_reason.value if case_reason else None
if contact_name != "":
contact_id = handle.query(f"SELECT Id FROM Contact WHERE Name = '{contact_name}'")
if contact_id['records'] == []:
return {"Error": "Invalid Contact name"}
contact_id = contact_id['records'][0]['Id']
if account_name != "":
account_id = handle.query(f"SELECT Id FROM Account WHERE Name = '{account_name}'")
if account_id['records'] == []:
return {"Error": "Invalid Account name"}
account_id = account_id['records'][0]['Id']
data = {}
data['Status'] = status
data['Priority'] = priority
data['Origin'] = case_origin
data['ContactId'] = contact_id
data['AccountId'] = account_id
data['Type'] = type
data['Reason'] = case_reason
if web_information:
data['SuppliedEmail'] = web_information.get("web_email", None)
data['SuppliedName'] = web_information.get("web_name", None)
data['SuppliedCompany'] = web_information.get("web_company", None)
data['SuppliedPhone'] = web_information.get("web_phone", None)
if additional_information:
if additional_information.get("product"):
data["Product__c"] = additional_information.get("product")
if additional_information.get("engineering_req_number"):
data["EngineeringReqNumber__c"] = additional_information.get("engineering_req_number")
if additional_information.get("potential_liability"):
data["PotentialLiability__c"] = additional_information.get("potential_liability")
if additional_information.get("sla_violation"):
data["SLAViolation__c"] = additional_information.get("sla_violation")
data['Subject'] = subject
data['Description'] = description
data['Comments'] = internal_comments
case = handle.Case.create(data)
if case.get("success"):
return handle.Case.get(case.get("id"))
return case.get("errors")
================================================
FILE: SalesForce/legos/salesforce_delete_case/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: SalesForce/legos/salesforce_delete_case/__init__.py
================================================
================================================
FILE: SalesForce/legos/salesforce_delete_case/salesforce_delete_case.json
================================================
{
"action_title": "Delete Salesforce Case",
"action_description": "Delete a Salesforce case",
"action_type": "LEGO_TYPE_SALESFORCE",
"action_entry_function": "salesforce_delete_case",
"action_needs_credential": true,
"action_supports_poll": false,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": false,
"action_verbs": [
"delete"
],
"action_nouns": [
"salesforce",
"case"
],
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_SALESFORCE"]
}
================================================
FILE: SalesForce/legos/salesforce_delete_case/salesforce_delete_case.py
================================================
import pprint
from pydantic import BaseModel, Field
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
case_number: str = Field(
title='Case Number',
description='The Case number of the case to delete')
def salesforce_delete_case_printer(output):
if output is None:
return
pprint.pprint(output)
def salesforce_delete_case(handle, case_number: str) -> str:
"""salesforce_delete_case deletes a particular case.
:type case_number: str
:param case_number: The Case number of the case to delete
"""
record_id = handle.query(f"SELECT Id FROM Case WHERE CaseNumber = '{case_number}'")
if not record_id['records']:
return "Invalid Case Number"
resp = handle.Case.delete(record_id['records'][0]['Id'])
if resp == 204:
return f"Case {case_number} deleted successfully"
return "Error Occurred"
================================================
FILE: SalesForce/legos/salesforce_get_case/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: SalesForce/legos/salesforce_get_case/__init__.py
================================================
================================================
FILE: SalesForce/legos/salesforce_get_case/salesforce_get_case.json
================================================
{
"action_title": "Get Salesforce Case Info",
"action_description": "Get a Salesforce case info",
"action_type": "LEGO_TYPE_SALESFORCE",
"action_entry_function": "salesforce_get_case",
"action_needs_credential": true,
"action_supports_poll": false,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": false,
"action_verbs": [
"get"
],
"action_nouns": [
"salesforce",
"case",
"info"
],
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_SALESFORCE"]
}
================================================
FILE: SalesForce/legos/salesforce_get_case/salesforce_get_case.py
================================================
import json
import pprint
from typing import Dict
from pydantic import BaseModel, Field
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
case_number: str = Field(
title='Case Number',
description='The Case number to get the details about the case')
def salesforce_get_case_printer(output):
if output is None:
return
print("\n")
print(json.dumps(output, indent=4))
def salesforce_get_case(handle, case_number: str) -> Dict:
"""salesforce_get_case gets the details about a particular case.
:type case_number: str
:param case_number: The Case number to get the details about the case
"""
record_id = handle.query(f"SELECT Id FROM Case WHERE CaseNumber = '{case_number}'")
if not record_id['records']:
return {"Error": "Invalid Case Number"}
return handle.Case.get(record_id['records'][0]['Id'])
================================================
FILE: SalesForce/legos/salesforce_get_case_status/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: SalesForce/legos/salesforce_get_case_status/__init__.py
================================================
================================================
FILE: SalesForce/legos/salesforce_get_case_status/salesforce_get_case_status.json
================================================
{
"action_title": "Get Salesforce Case Status",
"action_description": "Get a Salesforce case status",
"action_type": "LEGO_TYPE_SALESFORCE",
"action_entry_function": "salesforce_get_case_status",
"action_needs_credential": true,
"action_supports_poll": false,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_iteration": false,
"action_verbs": [
"get"
],
"action_nouns": [
"salesforce",
"case",
"status"
],
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_SALESFORCE"]
}
================================================
FILE: SalesForce/legos/salesforce_get_case_status/salesforce_get_case_status.py
================================================
import pprint
from pydantic import BaseModel, Field
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
case_number: str = Field(
title='Case Number',
description='The Case number to get the details about the case')
def salesforce_get_case_status_printer(output):
if output is None:
return
print("\n")
print(output)
def salesforce_get_case_status(handle, case_number: str) -> str:
"""salesforce_get_case_status gets the status about a particular case.
:type case_number: str
:param case_number: The Case number to get the details about the case
"""
records = handle.query(f"SELECT Id FROM Case WHERE CaseNumber = '{case_number}'")
if not records['records']:
return "Invalid Case Number"
case = handle.Case.get(records['records'][0]['Id'])
return case.get("Status")
================================================
FILE: SalesForce/legos/salesforce_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: SalesForce/legos/salesforce_get_handle/__init__.py
================================================
================================================
FILE: SalesForce/legos/salesforce_get_handle/salesforce_get_handle.json
================================================
{
"action_title": "Get Salesforce handle",
"action_description": "Get Salesforce handle",
"action_type": "LEGO_TYPE_SALESFORCE",
"action_entry_function": "salesforce_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_output_type": "ACTION_OUTPUT_TYPE_NONE",
"action_supports_iteration": false,
"action_verbs": [
"get"
],
"action_nouns": [
"salesforce",
"handle"
],
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_SALESFORCE"]
}
================================================
FILE: SalesForce/legos/salesforce_get_handle/salesforce_get_handle.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def salesforce_get_handle(handle) -> None:
"""
salesforce_get_handle returns the Salesforce handle.
:rtype: Salesforce handle.
"""
return handle
================================================
FILE: SalesForce/legos/salesforce_search_case/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: SalesForce/legos/salesforce_search_case/__init__.py
================================================
================================================
FILE: SalesForce/legos/salesforce_search_case/salesforce_search_case.json
================================================
{
"action_title": "Search Salesforce Case",
"action_description": "Search a Salesforce case",
"action_type": "LEGO_TYPE_SALESFORCE",
"action_entry_function": "salesforce_search_case",
"action_needs_credential": true,
"action_supports_poll": false,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": false,
"action_verbs": [
"search"
],
"action_nouns": [
"salesforce",
"case"
],
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_SALESFORCE"]
}
================================================
FILE: SalesForce/legos/salesforce_search_case/salesforce_search_case.py
================================================
import json
import pprint
from typing import List
from pydantic import BaseModel, Field
from tabulate import tabulate
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
case_number: str = Field(
title='Case Number',
description='The Case number to get the details about the case')
def salesforce_search_case_printer(output):
if output is None:
return
print("\n")
tb_data = []
for record in output:
print(json.dumps(record, indent=4))
case_number = record.get("CaseNumber")
data = [case_number]
tb_data.append(data)
print("\n")
od = tabulate(tb_data, headers=['CaseNumber'], tablefmt="grid")
print(od)
def salesforce_search_case(handle, search: str) -> List:
"""salesforce_search_case gets the details about a particular case.
:type search: str
:param search: Search based on Status/Priority/Subject/CaseNumber/Reason
"""
search = "%" + search
query = f"SELECT Id FROM Case WHERE Priority Like '{search}'" \
f"Or Status Like '{search}' " \
f"Or Subject Like '{search}' " \
f"Or Reason Like '{search}' " \
f"Or CaseNumber Like '{search}' " \
records = handle.query(query)['records']
if records:
cases = []
for record in records:
cases.append(handle.Case.get(record['Id']))
return cases
return records
================================================
FILE: SalesForce/legos/salesforce_update_case/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: SalesForce/legos/salesforce_update_case/__init__.py
================================================
================================================
FILE: SalesForce/legos/salesforce_update_case/salesforce_update_case.json
================================================
{
"action_title": "Update Salesforce Case",
"action_description": "Update a Salesforce case",
"action_type": "LEGO_TYPE_SALESFORCE",
"action_entry_function": "salesforce_update_case",
"action_needs_credential": true,
"action_supports_poll": false,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": false,
"action_verbs": [
"update"
],
"action_nouns": [
"salesforce",
"case"
],
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_SALESFORCE"]
}
================================================
FILE: SalesForce/legos/salesforce_update_case/salesforce_update_case.py
================================================
import pprint
import json
from typing import Dict, Optional
from pydantic import BaseModel, Field
from tabulate import tabulate
from unskript.enums.salesforce_enums import Status, CaseOrigin, CaseType, Priority, CaseReason, \
PotentialLiability, SLAViolation
pp = pprint.PrettyPrinter(indent=4)
class AdditionalInformation(BaseModel):
product: Optional[str] = Field(
title='Product',
description='Product associated with case')
engineering_req_number: Optional[str] = Field(
title='Engineering Req Number',
description='Engineering Req Number')
potential_liability: Optional[PotentialLiability] = Field(
title='Potential Liability',
description='Potential Liability')
sla_violation: Optional[SLAViolation] = Field(
title='SLA Violation',
description='SLA Violation')
class WebInformation(BaseModel):
web_email: Optional[str] = Field(
title='Web Email',
description='Web Email')
web_company: Optional[str] = Field(
title='Web Company',
description='Web Company')
web_name: Optional[str] = Field(
title='Web Name',
description='Web Name')
web_phone: Optional[str] = Field(
title='Web Phone',
description='Web Phone')
class InputSchema(BaseModel):
case_number: str = Field(
title='Case Number',
description='The Case number to get the details about the case')
status: Status = Field(
title='Status',
description='The status of the case. Default is "New"')
priority: Optional[Priority] = Field(
title='Priority',
description='The priority of the case')
case_origin: CaseOrigin = Field(
title='Case Origin',
description='The origin of the case')
contact_name: Optional[str] = Field(
title='Contact Name',
description='The name of the contact')
account_name: Optional[str] = Field(
title='Account Name',
description='The name of the Account')
type: Optional[CaseType] = Field(
title='Type',
description='The type of the case')
case_reason: Optional[CaseReason] = Field(
title='Case Reason ',
description='The Reason for the case')
subject: Optional[str] = Field(
title='Subject',
description='Title of the case')
description: Optional[str] = Field(
title='Description',
description='A short description about the case')
internal_comments: Optional[str] = Field(
title='Internal Comments',
description='Comments about thw case')
additional_information: Optional[AdditionalInformation] = Field(...)
web_information: Optional[WebInformation] = Field(None, alias='Web Information')
def salesforce_update_case_printer(output):
if output is None:
return
print("\n")
print(json.dumps(output, indent=4))
case_number = output.get("CaseNumber")
data = []
data.append(case_number)
print("\n")
od = tabulate([data], headers=['CaseNumber'], tablefmt="grid")
print(od)
def salesforce_update_case(handle,
case_number: str,
status: Status,
case_origin: CaseOrigin,
priority: Priority = Priority.LOW,
contact_name: str = "",
account_name: str = "",
type: CaseType = CaseType.ELECTRONIC,
case_reason: CaseReason = CaseReason.OTHER,
subject: str = "",
description: str = "",
internal_comments: str = "",
additional_information: dict = None,
web_information: dict = None,
) -> Dict:
"""salesforce_update_case update salesforce case
:type status: Status
:param status: The status of the case. Default is "New"
:type case_number: str
:param case_number: The Case number to get the details about the case
:type case_origin: CaseOrigin
:param case_origin: The origin of the case.
:type priority: Priority
:param priority: The priority of the case.
:type contact_name: str
:param contact_name: The name of the contact.
:type account_name: str
:param account_name: The name of the Account.
:type type: CaseType
:param type: The type of the case.
:type case_reason: CaseReason
:param case_reason: The Reason for the case.
:type subject: str
:param subject: Title of the case.
:type description: str
:param description: A short description about the case.
:type internal_comments: str
:param internal_comments: Comments about thw case.
:rtype:
"""
#salesforce_update_case updated a case in Salesforce.
records = handle.query(f"SELECT Id FROM Case WHERE CaseNumber = '{case_number}'")
if not records['records']:
return {"Error": "Invalid Case Number"}
record_id = records['records'][0]['Id']
case = handle.Case.get(record_id)
data = {}
# resp = handle.Case.update(record_id, data)
contact_id = ""
account_id = ""
status = status.value if status else case.get("Status")
case_origin = case_origin.value if case_origin else case.get("Origin")
type = type.value if type else case.get("Type")
priority = priority.value if priority else case.get("Priority")
case_reason = case_reason.value if case_reason else case.get("Reason")
if contact_name != "":
contact_id = handle.query(f"SELECT Id FROM Contact WHERE Name = '{contact_name}'")
if contact_id['records'] == []:
return {"Error": "Invalid Contact name"}
contact_id = contact_id['records'][0]['Id']
if account_name != "":
account_id = handle.query(f"SELECT Id FROM Account WHERE Name = '{account_name}'")
if account_id['records'] == []:
return {"Error": "Invalid Account name"}
else:
account_id = account_id['records'][0]['Id']
# data = {}
data['Status'] = status
data['Priority'] = priority
data['Origin'] = case_origin
data['ContactId'] = contact_id
data['AccountId'] = account_id
data['Type'] = type
data['Reason'] = case_reason
if web_information:
if web_information.get("web_email", None):
data['SuppliedEmail'] = web_information.get("web_email", None)
if web_information.get("web_name", None):
data['SuppliedName'] = web_information.get("web_name", None)
if web_information.get("web_company", None):
data['SuppliedCompany'] = web_information.get("web_company", None)
if web_information.get("web_phone", None):
data['SuppliedPhone'] = web_information.get("web_phone", None)
if additional_information:
if additional_information.get("product"):
data["Product__c"] = additional_information.get("product")
if additional_information.get("engineering_req_number"):
data["EngineeringReqNumber__c"] = additional_information.get("engineering_req_number")
if additional_information.get("potential_liability"):
data["PotentialLiability__c"] = additional_information.get("potential_liability")
if additional_information.get("sla_violation"):
data["SLAViolation__c"] = additional_information.get("sla_violation")
data['Subject'] = subject if subject else case.get("Subject")
data['Description'] = description if description else case.get("Description")
data['Comments'] = internal_comments if internal_comments else case.get("Comments")
resp = handle.Case.update(record_id, data)
if resp == 204:
return handle.Case.get(record_id)
return resp
================================================
FILE: Slack/README.md
================================================
# Slack Actions
* [Create Slack Channel and Invite Users](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Slack/legos/slack_create_channel_invite_users/README.md): Create a Slack Channel with given name, and invite a list of userIds to the channel.
* [Get Slack SDK Handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Slack/legos/slack_get_handle/README.md): Get Slack SDK Handle
* [Slack Lookup User by Email](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Slack/legos/slack_lookup_user_by_email/README.md): Given an email address, find the slack user in the workspace.
You can the extract their Profile picture, or retrieve their userid (which you can use to send messages) from the output.
* [Post Slack Image](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Slack/legos/slack_post_image/README.md): Post Slack Image
* [Post Slack Message](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Slack/legos/slack_post_message/README.md): Post Slack Message
* [Slack Send DM](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Slack/legos/slack_send_DM/README.md): Given a list of Slack IDs, this Action will create a DM (one user) or group chat (multiple users), and send a message to the chat
================================================
FILE: Slack/__init__.py
================================================
================================================
FILE: Slack/legos/__init__.py
================================================
================================================
FILE: Slack/legos/slack_create_channel_invite_users/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Slack/legos/slack_create_channel_invite_users/__init__.py
================================================
================================================
FILE: Slack/legos/slack_create_channel_invite_users/slack_create_channel_invite_users.json
================================================
{
"action_title": "Create Slack Channel and Invite Users",
"action_description": "Create a Slack Channel with given name, and invite a list of userIds to the channel.",
"action_type": "LEGO_TYPE_SLACK",
"action_entry_function": "slack_create_channel_invite_users",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: Slack/legos/slack_create_channel_invite_users/slack_create_channel_invite_users.py
================================================
from __future__ import annotations
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
from slack_sdk import WebClient
## note: Your Slack App will need the files:write scope.
# Your Bot will also need to be a member of the channel
class InputSchema(BaseModel):
channel: str = Field(..., description='Name of slack channel.', title='Channel')
user_list: List = Field(
...,
description='List of users to invite to the new channel.',
title='user_list',
#list is slack user IDs, for example ['U046UH5F2HZ']
)
pp = pprint.PrettyPrinter(indent=2)
def slack_create_channel_invite_users_printer(output):
if output is not None:
pprint.pprint(output)
def slack_create_channel_invite_users(
handle: WebClient,
channel: str,
user_list: list) -> str:
try:
response = handle.conversations_create(
name = channel,
is_private=False
)
# Extract the ID of the created channel
channel_id = response["channel"]["id"]
for username in user_list:
# Call the conversations.invite method to invite each user to the channel
user_response=handle.conversations_invite(
channel=channel_id,
users=username
)
print(user_response)
print(f"Invited user '{username}' to the channel.")
return f"Successfully created Channel: #{channel}"
except Exception as e:
print("\n\n")
pp.pprint(
f"Failed sending message to slack channel {channel}, Error: {str(e)}")
return f"Unable to send message on {channel}"
================================================
FILE: Slack/legos/slack_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://unskript.com)
================================================
FILE: Slack/legos/slack_get_handle/__init__.py
================================================
================================================
FILE: Slack/legos/slack_get_handle/slack_get_handle.json
================================================
{
"action_title": "Get Slack SDK Handle",
"action_description": "Get Slack SDK Handle",
"action_type": "LEGO_TYPE_SLACK",
"action_entry_function": "slack_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_output_type": "ACTION_OUTPUT_TYPE_NONE",
"action_supports_iteration": false,
"action_verbs": [
"get"
],
"action_nouns": [
"slack",
"handle"
]
}
================================================
FILE: Slack/legos/slack_get_handle/slack_get_handle.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def slack_get_handle(handle) -> None:
"""slack_get_handle returns the slack handle.
:rtype: slack Handle.
"""
return handle
================================================
FILE: Slack/legos/slack_lookup_user_by_email/README.md
================================================
[
## Try it Out
You Try this Action in the unSkript [Free Trial](https://us.app.unskript.io/), or using the [open source Docker image](http://runbooks.sh).
================================================
FILE: Slack/legos/slack_lookup_user_by_email/__init__.py
================================================
================================================
FILE: Slack/legos/slack_lookup_user_by_email/slack_lookup_user_by_email.json
================================================
{
"action_title": "Slack Lookup User by Email",
"action_description": "Given an email address, find the slack user in the workspace.\n You can the extract their Profile picture, or retrieve their userid (which you can use to send messages) from the output.",
"action_type": "LEGO_TYPE_SLACK",
"action_entry_function": "slack_lookup_user_by_email",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: Slack/legos/slack_lookup_user_by_email/slack_lookup_user_by_email.py
================================================
from __future__ import annotations
##
# Copyright (c) 2023 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
from slack_sdk import WebClient
## note: Your Slack App will need the users:read.email scope
# Otherwise you cannot access user's emails!
class InputSchema(BaseModel):
email: str = Field(..., description='Email Address of user', title='email')
pp = pprint.PrettyPrinter(indent=2)
def slack_lookup_user_by_email_printer(output):
if output is not None:
pprint.pprint(output)
def slack_lookup_user_by_email(
handle: WebClient,
email: str) -> Dict:
try:
response = handle.users_lookupByEmail(email=email)
#print(response)
return response['user']
except Exception as e:
print("\n\n")
pp.pprint(
f"Failed to find user, Error: {str(e)}")
return f"Unable to send find user with email {email}"
================================================
FILE: Slack/legos/slack_post_image/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://unskript.com)
================================================
FILE: Slack/legos/slack_post_image/__init__.py
================================================
================================================
FILE: Slack/legos/slack_post_image/slack_post_image.json
================================================
{ "action_title": "Post Slack Image",
"action_description": "Post Slack Image",
"action_type": "LEGO_TYPE_SLACK",
"action_entry_function": "slack_post_image",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_SLACK"]
}
================================================
FILE: Slack/legos/slack_post_image/slack_post_image.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from slack_sdk import WebClient
from beartype import beartype
pp = pprint.PrettyPrinter(indent=2)
## note: Your Slack App will need the files:write scope.
# Your Bot will also need to be a member of the channel
class InputSchema(BaseModel):
channel: str = Field(
title='Channel',
description='Name of slack channel.')
message: str = Field(
title='message',
description='Message for slack channel.')
image: str = Field(
title='image',
description='Path to image to be sent.')
@beartype
def slack_post_image_printer(output):
if output is not None:
pprint.pprint(output)
@beartype
def slack_post_image(
handle: WebClient,
channel: str,
message:str,
image: str) -> str:
try:
handle.files_upload(
channels = channel,
initial_comment=message,
file=image
)
return f"Successfully Sent Message on Channel: #{channel}"
except Exception as e:
print("\n\n")
pp.pprint(
f"Failed sending message to slack channel {channel}, Error: {str(e)}")
return f"Unable to send message on {channel}"
================================================
FILE: Slack/legos/slack_post_message/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://unskript.com)
================================================
FILE: Slack/legos/slack_post_message/__init__.py
================================================
================================================
FILE: Slack/legos/slack_post_message/slack_post_message.json
================================================
{ "action_title": "Post Slack Message",
"action_description": "Post Slack Message",
"action_type": "LEGO_TYPE_SLACK",
"action_entry_function": "slack_post_message",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_SLACK"]
}
================================================
FILE: Slack/legos/slack_post_message/slack_post_message.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from beartype import beartype
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
pp = pprint.PrettyPrinter(indent=2)
class InputSchema(BaseModel):
channel: str = Field(
title='Channel',
description='Name of slack channel.')
message: str = Field(
title='Message',
description='Message for slack channel.')
@beartype
def slack_post_message_printer(output):
if output is not None:
pprint.pprint(output)
@beartype
def slack_post_message(
handle: WebClient,
channel: str,
message: str) -> str:
try:
handle.chat_postMessage(
channel=channel,
text=message)
return f"Successfully Sent Message on Channel: #{channel}"
except SlackApiError as e:
pp.pprint(
f"Failed sending message to slack channel {channel}, Error: {e.response['error']}")
if e.response['error'] == 'channel_not_found':
raise Exception('Channel Not Found') from e
if e.response['error'] == 'duplicate_channel_not_found':
raise Exception('Channel associated with the message_id not valid') from e
if e.response['error'] == 'not_in_channel':
raise Exception('Cannot post message to channel user is not in') from e
if e.response['error'] == 'is_archived':
raise Exception('Channel has been archived') from e
if e.response['error'] == 'msg_too_long':
raise Exception('Message text is too long') from e
if e.response['error'] == 'no_text':
raise Exception('Message text was not provided') from e
if e.response['error'] == 'restricted_action':
raise Exception('Workspace preference prevents user from posting') from e
if e.response['error'] == 'restricted_action_read_only_channel':
raise Exception('Cannot Post message, read-only channel') from e
if e.response['error'] == 'team_access_not_granted':
raise Exception('The token used is not granted access to the workspace') from e
if e.response['error'] == 'not_authed':
raise Exception('No Authtnecition token provided') from e
if e.response['error'] == 'invalid_auth':
raise Exception('Some aspect of Authentication cannot be validated. Request denied') from e
if e.response['error'] == 'access_denied':
raise Exception('Access to a resource specified in the request denied') from e
if e.response['error'] == 'account_inactive':
raise Exception('Authentication token is for a deleted user') from e
if e.response['error'] == 'token_revoked':
raise Exception('Authentication token for a deleted user has been revoked') from e
if e.response['error'] == 'no_permission':
raise Exception('The workspace toekn used does not have necessary permission to send message') from e
if e.response['error'] == 'ratelimited':
raise Exception('The request has been ratelimited. Retry sending message later') from e
if e.response['error'] == 'service_unavailable':
raise Exception('The service is temporarily unavailable') from e
if e.response['error'] == 'fatal_error':
raise Exception('The server encountered catostrophic error while sending message') from e
if e.response['error'] == 'internal_error':
raise Exception('The server could not complete operation, likely due to transietn issue') from e
if e.response['error'] == 'request_timeout':
raise Exception('Sending message error via POST: either message was missing or truncated') from e
else:
raise Exception(f'Failed Sending Message to slack channel {channel} Error: {e.response["error"]}') from e
except Exception as e:
print("\n\n")
pp.pprint(
f"Failed sending message to slack channel {channel}, Error: {str(e)}")
return f"Unable to send message on {channel}"
================================================
FILE: Slack/legos/slack_send_DM/README.md
================================================
[
## Try it Out
You Try this Action in the unSkript [Free Trial](https://us.app.unskript.io/), or using the [open source Docker image](http://runbooks.sh).
================================================
FILE: Slack/legos/slack_send_DM/__init__.py
================================================
================================================
FILE: Slack/legos/slack_send_DM/slack_send_DM.json
================================================
{
"action_title": "Slack Send DM",
"action_description": "Given a list of Slack IDs, this Action will create a DM (one user) or group chat (multiple users), and send a message to the chat",
"action_type": "LEGO_TYPE_SLACK",
"action_entry_function": "slack_send_DM",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_is_check": false,
"action_supports_iteration": true,
"action_supports_poll": true
}
================================================
FILE: Slack/legos/slack_send_DM/slack_send_DM.py
================================================
from __future__ import annotations
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from pydantic import BaseModel, Field
from slack_sdk import WebClient
from typing import Dict, List
## note: Your Slack App will need the:
#im:write (for DM)
#mpim:write scope (for group IM messages).
# Your Bot will also need to be a member of the channel
class InputSchema(BaseModel):
users: List = Field(..., description='List of users to DM', title='users')
message: str = Field(..., description='Message to DM to users.', title='message')
pp = pprint.PrettyPrinter(indent=2)
def slack_send_DM_printer(output):
if output is not None:
pprint.pprint(output)
def slack_send_DM(
handle: WebClient,
users: list,
message:str) -> Dict:
#slack takes in multiple users as a comma separated string
comma_separated_users = ', '.join(str(user) for user in users)
try:
#open the DM
createDM = handle.conversations_open(users=comma_separated_users)
#get the ID
channel_id = createDM['channel']['id']
#send a message
# Send message
message_response = handle.chat_postMessage(channel=channel_id, text=message)
return message_response['message']
except Exception as e:
print("\n\n")
pp.pprint(
f"Failed sending message to slack channel, Error: {str(e)}")
return f"Unable to send message "
================================================
FILE: Snowflake/README.md
================================================
# Snowflake Actions
* [Snowflake Read Query](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Snowflake/legos/snowflake_read_query/README.md): Snowflake Read Query
* [Snowflake Write Query](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Snowflake/legos/snowflake_write_query/README.md): Snowflake Write Query
================================================
FILE: Snowflake/__init__.py
================================================
================================================
FILE: Snowflake/legos/__init__.py
================================================
================================================
FILE: Snowflake/legos/snowflake_read_query/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Snowflake/legos/snowflake_read_query/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Snowflake/legos/snowflake_read_query/snowflake_read_query.json
================================================
{
"action_title": "Snowflake Read Query",
"action_description": "Snowflake Read Query",
"action_type": "LEGO_TYPE_SNOWFLAKE",
"action_entry_function": "snowflake_read_query",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_SNOWFLAKE"]
}
================================================
FILE: Snowflake/legos/snowflake_read_query/snowflake_read_query.py
================================================
import pprint
from typing import List
from pydantic import BaseModel, Field
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
query: str = Field(
title='Query',
description='Query to read data')
db_name: str = Field(
title='Database name',
description='Name of the database to use')
schema_name: str = Field(
title='Schema name',
description='Name of the Schema to use')
def snowflake_read_query_printer(output):
if output is None:
return
pprint.pprint(output)
def snowflake_read_query(handle, query: str, db_name: str, schema_name: str) -> List:
"""snowflake_read_query Runs query with the provided parameters.
:type handle: object
:param handle: Object returned from task.validate(...).
:type query: str
:param query: Query to read data.
:type db_name: str
:param db_name: Name of the database to use.
:type schema_name: str
:param schema_name: Name of the Schema to use
:rtype: List if success. Exception on error.
"""
# Input param validation.
cur = handle.cursor()
cur.execute("USE DATABASE " + db_name)
cur.execute("USE SCHEMA " + schema_name)
cur.execute(query)
res = cur.fetchall()
cur.close()
handle.close()
return res
================================================
FILE: Snowflake/legos/snowflake_write_query/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Snowflake/legos/snowflake_write_query/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Snowflake/legos/snowflake_write_query/snowflake_write_query.json
================================================
{
"action_title": "Snowflake Write Query",
"action_description": "Snowflake Write Query",
"action_type": "LEGO_TYPE_SNOWFLAKE",
"action_entry_function": "snowflake_write_query",
"action_needs_credential": true,
"action_supports_poll": true,
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_SNOWFLAKE"]
}
================================================
FILE: Snowflake/legos/snowflake_write_query/snowflake_write_query.py
================================================
import pprint
from typing import Dict
from pydantic import BaseModel, Field
pp = pprint.PrettyPrinter(indent=4)
class InputSchema(BaseModel):
query: str = Field(
title='Query',
description='Query to write data')
db_name: str = Field(
title='Database name',
description='Name of the database to use')
schema_name: str = Field(
title='Schema name',
description='Name of the Schema to use')
def snowflake_write_query_printer(output):
if output is None:
return
pprint.pprint(output)
def snowflake_write_query(handle, query: str, db_name: str, schema_name: str) -> Dict:
"""snowflake_write_query Runs query with the provided parameters.
:type handle: object
:param handle: Object returned from task.validate(...).
:type query: str
:param query: Query to read data.
:type db_name: str
:param db_name: Name of the database to use.
:type schema_name: str
:param schema_name: Name of the Schema to use
:rtype: Dict if success. Exception on error.
"""
# Input param validation.
result = {}
cur = handle.cursor()
cur.execute("USE DATABASE " + db_name)
cur.execute("USE SCHEMA " + schema_name)
cur.execute(query)
result["Result"] = "The query executed successfully!"
cur.close()
handle.close()
return result
================================================
FILE: Splunk/README.md
================================================
# Splunk Actions
* [Get Splunk SDK Handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Splunk/legos/splunk_get_handle/README.md): Get Splunk SDK Handle
================================================
FILE: Splunk/__init__.py
================================================
================================================
FILE: Splunk/legos/__init__.py
================================================
================================================
FILE: Splunk/legos/splunk_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Splunk/legos/splunk_get_handle/__init__.py
================================================
================================================
FILE: Splunk/legos/splunk_get_handle/splunk_get_handle.json
================================================
{
"action_title": "Get Splunk SDK Handle",
"action_description": "Get Splunk SDK Handle",
"action_type": "LEGO_TYPE_SPLUNK",
"action_entry_function": "splunk_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_output_type": "ACTION_OUTPUT_TYPE_NONE",
"action_supports_iteration": false,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_SPLUNK"]
}
================================================
FILE: Splunk/legos/splunk_get_handle/splunk_get_handle.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def splunk_get_handle(handle):
"""splunk_get_handle returns the splunk handle.
:rtype: splunk handle.
"""
return handle
================================================
FILE: Stripe/README.md
================================================
# Stripe Actions
* [ Capture a Charge](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Stripe/legos/stripe_capture_charge/README.md): Capture the payment of an existing, uncaptured, charge
* [Close Dispute](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Stripe/legos/stripe_close_dispute/README.md): Close Dispute
* [Create a Charge](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Stripe/legos/stripe_create_charge/README.md): Create a Charge
* [Create a Refund](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Stripe/legos/stripe_create_refund/README.md): Create a Refund
* [Get list of charges previously created](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Stripe/legos/stripe_get_all_charges/README.md): Get list of charges previously created
* [Get list of disputes](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Stripe/legos/stripe_get_all_disputes/README.md): Get list of disputes
* [Get list of refunds](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Stripe/legos/stripe_get_all_refunds/README.md): Get list of refunds for the given threshold.
* [Get Stripe Handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Stripe/legos/stripe_get_handle/README.md): Get Stripe Handle
* [Retrieve a Charge](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Stripe/legos/stripe_retrieve_charge/README.md): Retrieve a Charge
* [Retrieve details of a dispute](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Stripe/legos/stripe_retrieve_dispute/README.md): Retrieve details of a dispute
* [Retrieve a refund](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Stripe/legos/stripe_retrieve_refund/README.md): Retrieve a refund
* [Update a Charge](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Stripe/legos/stripe_update_charge/README.md): Update a Charge
* [Update Dispute](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Stripe/legos/stripe_update_dispute/README.md): Update Dispute
* [Update Refund](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Stripe/legos/stripe_update_refund/README.md): Updates the specified refund by setting the values of the parameters passed.
================================================
FILE: Stripe/__init__.py
================================================
================================================
FILE: Stripe/legos/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_capture_charge/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_capture_charge/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_capture_charge/stripe_capture_charge.json
================================================
{
"action_title": " Capture a Charge",
"action_description": " Capture the payment of an existing, uncaptured, charge",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_capture_charge",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_CHARGE"]
}
================================================
FILE: Stripe/legos/stripe_capture_charge/stripe_capture_charge.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
charge_id: str = Field(
title='Charge Id',
description='Capture the payment of an existing, uncaptured, charge'
)
def stripe_capture_charge_printer(output):
if isinstance(output, (list, tuple)):
pprint.pprint(output)
elif isinstance(output, dict):
pprint.pprint(output)
else:
pprint.pprint(output)
def stripe_capture_charge(handle, charge_id:str) -> List:
"""stripe_capture_charge Capture the payment of an existing, uncaptured, charge.
:type charge_id: string
:param charge_id: Capture the payment of an existing, uncaptured, charge.
:rtype: List with response from the describe API.
"""
# Input param validation
result = []
try:
charge = handle.Charge.capture(charge_id)
result.append(charge)
return result
except Exception as e:
pprint.pprint(e)
return None
================================================
FILE: Stripe/legos/stripe_close_dispute/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_close_dispute/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_close_dispute/stripe_close_dispute.json
================================================
{
"action_title": "Close Dispute",
"action_description": "Close Dispute",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_close_dispute",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_DISPUTE"]
}
================================================
FILE: Stripe/legos/stripe_close_dispute/stripe_close_dispute.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
dispute_id: str = Field(
title='Dispute ID',
description='Dispute ID'
)
def stripe_close_dispute_printer(output):
if isinstance(output, (list, tuple)):
pprint.pprint(output)
elif isinstance(output, dict):
pprint.pprint(output)
else:
pprint.pprint(output)
def stripe_close_dispute(handle, dispute_id:str) -> List:
"""stripe_close_dispute Close Dispute
:type dispute_id: string
:param dispute_id: Dispute ID
:rtype: List with response from the describe API.
"""
# Input param validation
result = []
try:
resp = handle.Dispute.close(dispute_id)
result.append(resp)
return result
except Exception as e:
pprint.pprint(e)
return None
================================================
FILE: Stripe/legos/stripe_create_charge/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_create_charge/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_create_charge/stripe_create_charge.json
================================================
{
"action_title": "Create a Charge",
"action_description": "Create a Charge",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_create_charge",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_CHARGE"]
}
================================================
FILE: Stripe/legos/stripe_create_charge/stripe_create_charge.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from typing import Optional, List
from pydantic import BaseModel, Field
from tabulate import tabulate
class InputSchema(BaseModel):
amount: int = Field(
title='Amount',
description='Amount intended to be collected by this payment')
currency: Optional[str] = Field(
'usd',
title='Currency',
description='Three letter ISO currency code, eg: usd, cad, eur')
source: Optional[str] = Field(
title='Payment Source',
description='A payment source to be charged. eg. credit card ID, bank account, token')
description: Optional[str] = Field(
title='Description',
description='Reason for the Charge. Small Description about charge.')
def stripe_create_charge_printer(output):
if output is None:
return
od = tabulate(output, headers=['Amount', 'ID', 'Description'])
print(od)
def stripe_create_charge(
handle,
amount: int,
source: str = "",
description: str = "",
currency: str = "usd"
) -> List:
"""stripe_create_charge Charges a credit card or other payment source to the given amount
in the given currency.
:type amount: int
:param amount: Amount intended to be collected by this payment.
:type source: str
:param source: A payment source to be charged.
:type description: str
:param description: Reason for the Charge. Small Description about charge.
:type currency: str
:param currency: Three letter ISO currency code, eg: usd, cad, eur
:rtype: Returns the results of all recent charges.
"""
# Input param validation.
result = []
try:
data = handle.Charge.create(
amount=amount,
currency=currency,
source=source,
description=description)
result.append([str(data['amount']), data['id'], data['description']])
except Exception:
data = 'Error occurred when Creating a charge'
print(data)
return result
================================================
FILE: Stripe/legos/stripe_create_customer/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_create_customer/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_create_customer/stripe_create_customer.json
================================================
{
"action_title": "Create a customer",
"action_description": "Create a customer",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_create_customer",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_CUSTOMER"]
}
================================================
FILE: Stripe/legos/stripe_create_customer/stripe_create_customer.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
params: dict = Field(
title='Customer Data',
description='Params in key=value form.'
)
def stripe_create_customer_printer(output):
if isinstance(output, (list, tuple)):
pprint.pprint(output)
elif isinstance(output, dict):
pprint.pprint(output)
else:
pprint.pprint(output)
def stripe_create_customer(handle, params:dict) -> List:
"""stripe_create_customer Create a customer
:type params: dict
:param params: Params in key=value form.
:rtype: List with response from the describe API.
"""
# Input param validation
result = []
try:
customer = handle.Customer.create(**params)
result.append(customer)
return result
except Exception as e:
pprint.pprint(e)
return None
================================================
FILE: Stripe/legos/stripe_create_refund/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_create_refund/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_create_refund/stripe_create_refund.json
================================================
{
"action_title": "Create a Refund",
"action_description": "Create a Refund",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_create_refund",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_REFUND"]
}
================================================
FILE: Stripe/legos/stripe_create_refund/stripe_create_refund.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
charge_id: str = Field(
title='Charge Id',
description='The identifier of the charge to refund.'
)
def stripe_create_refund_printer(output):
if isinstance(output, (list, tuple)):
pprint.pprint(output)
elif isinstance(output, dict):
pprint.pprint(output)
else:
pprint.pprint(output)
def stripe_create_refund(handle, charge_id:str) -> List:
"""stripe_create_refund Create a Refund
:type charge_id: string
:param charge_id: The identifier of the charge to refund.
:rtype: List with response from the describe API.
"""
# Input param validation
result = []
try:
refund_obj = handle.Refund.create(charge=charge_id)
result.append(refund_obj)
return result
except Exception as e:
pprint.pprint(e)
return None
================================================
FILE: Stripe/legos/stripe_delete_customer/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_delete_customer/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_delete_customer/stripe_delete_customer.json
================================================
{
"action_title": "Delete customer",
"action_description": "Delete customer",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_delete_customer",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_CUSTOMER"]
}
================================================
FILE: Stripe/legos/stripe_delete_customer/stripe_delete_customer.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
customer_id: str = Field(
title='Customer Id',
description='Customer Id'
)
def stripe_delete_customer_printer(output):
if isinstance(output, (list, tuple)):
pprint.pprint(output)
elif isinstance(output, dict):
pprint.pprint(output)
else:
pprint.pprint(output)
def stripe_delete_customer(handle, customer_id:str) -> List:
"""stripe_delete_customer Delete Customer
:type customer_id: string
:param customer_id: Customer Id.
:rtype: List with response from the describe API.
"""
# Input param validation
result = []
try:
resp = handle.Customer.delete(customer_id)
result.append(resp)
return result
except Exception as e:
pprint.pprint(e)
return None
================================================
FILE: Stripe/legos/stripe_get_all_charges/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_get_all_charges/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_get_all_charges/stripe_get_all_charges.json
================================================
{
"action_title": "Get list of charges previously created",
"action_description": "Get list of charges previously created",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_get_all_charges",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_CHARGE"]
}
================================================
FILE: Stripe/legos/stripe_get_all_charges/stripe_get_all_charges.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
from typing import List
from pydantic import BaseModel, Field
from tabulate import tabulate
class InputSchema(BaseModel):
max_results: int = Field(
title='Maximum Results',
description='Threshold to get maximum result.'
)
def stripe_get_all_charges_printer(output):
if output is None:
return
od = tabulate(output, headers=['Amount', 'ID', 'Description'])
print(od)
def stripe_get_all_charges(handle, max_results: int = 25) -> List:
"""stripe_get_all_charges Returns a list of charges that was previously created. The
charges are returned in sorted order, with the most recent charges appearing first.
:type max_results: int
:param max_results: Threshold to get maximum result.
:rtype: Returns the results of all recent charges.
"""
result = []
try:
if max_results == 0:
data = handle.Charge.list()
for charge in data.auto_paging_iter():
result.append([charge['amount'], charge['id'], charge['description']])
else:
data = handle.Charge.list(limit=max_results)
for charge in data:
result.append([charge['amount'], charge['id'], charge['description']])
except Exception as e:
print(e)
return result
================================================
FILE: Stripe/legos/stripe_get_all_customers/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_get_all_customers/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_get_all_customers/stripe_get_all_customers.json
================================================
{
"action_title": "Get list of customers",
"action_description": "Get list of customers",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_get_all_customers",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_CUSTOMER"]
}
================================================
FILE: Stripe/legos/stripe_get_all_customers/stripe_get_all_customers.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
max_results: int = Field(
title='Maximum Results',
description='Threshold to get maximum result.'
)
def stripe_get_all_customers_printer(output):
if isinstance(output, (list, tuple)):
pprint.pprint(output)
elif isinstance(output, dict):
pprint.pprint(output)
else:
pprint.pprint(output)
def stripe_get_all_customers(handle, max_results: int = 25) -> List:
"""stripe_get_all_customers Returns a list of customers that was perviously created.
:type max_results: int
:param max_results: Threshold to get maximum result.
:rtype: Returns the results of all customers.
"""
# Input param validation.
result = []
try:
if max_results == 0:
output = handle.Customer.list(limit=100)
for customer in output.auto_paging_iter():
result.append(customer)
else:
output = handle.Customer.list(limit=max_results)
result = output["data"]
except Exception as e:
print(e)
return result
================================================
FILE: Stripe/legos/stripe_get_all_disputes/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_get_all_disputes/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_get_all_disputes/stripe_get_all_disputes.json
================================================
{
"action_title": "Get list of disputes",
"action_description": "Get list of disputes",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_get_all_disputes",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_DISPUTE"]
}
================================================
FILE: Stripe/legos/stripe_get_all_disputes/stripe_get_all_disputes.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
max_results: int = Field(
title='Maximum Results',
description='Threshold to get maximum result.'
)
def stripe_get_all_disputes_printer(output):
if isinstance(output, (list, tuple)):
pprint.pprint(output)
elif isinstance(output, dict):
pprint.pprint(output)
else:
pprint.pprint(output)
def stripe_get_all_disputes(handle, max_results: int = 25) -> List:
"""stripe_get_all_disputes Returns a list of disputes that was perviously created.
:type max_results: int
:param max_results: Threshold to get maximum result.
rtype: Returns a list of disputes that was perviously created.
"""
result = []
try:
if max_results == 0:
output = handle.Dispute.list()
for dispute in output.auto_paging_iter():
result.append(dispute)
else:
output = handle.Dispute.list(limit=max_results)
result = output["data"]
except Exception as e:
print(e)
return result
================================================
FILE: Stripe/legos/stripe_get_all_refunds/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_get_all_refunds/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_get_all_refunds/stripe_get_all_refunds.json
================================================
{
"action_title": "Get list of refunds",
"action_description": " Get list of refunds for the given threshold.",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_get_all_refunds",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_REFUND"]
}
================================================
FILE: Stripe/legos/stripe_get_all_refunds/stripe_get_all_refunds.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
max_results: int = Field(
title='Maximum Results',
description='Threshold to get maximum result.'
)
def stripe_get_all_refunds_printer(output):
if isinstance(output, (list, tuple)):
pprint.pprint(output)
elif isinstance(output, dict):
pprint.pprint(output)
else:
pprint.pprint(output)
def stripe_get_all_refunds(handle, max_results: int = 25) -> List:
"""stripe_get_all_refunds Returns a list of refunds that was previously created. The
charges are returned in sorted order, with the most recent charges appearing first.
:type max_results: int
:param max_results: Threshold to get maximum result.
:rtype: Returns the results of all recent charges.
"""
result = []
if max_results == 0:
output = handle.Refund.list()
for refunds in output.auto_paging_iter():
result.append(refunds)
else:
output = handle.Refund.list(limit=max_results)
for refunds in output:
result.append(refunds)
return result
================================================
FILE: Stripe/legos/stripe_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_get_handle/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_get_handle/stripe_get_handle.json
================================================
{
"action_title": "Get Stripe Handle",
"action_description": "Get Stripe Handle",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_supports_iteration": false
}
================================================
FILE: Stripe/legos/stripe_get_handle/stripe_get_handle.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def stripe_get_handle(handle):
"""stripe_get_handle returns the Stripe handle.
:rtype: Stripe Handle
"""
return handle
================================================
FILE: Stripe/legos/stripe_retrieve_charge/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_retrieve_charge/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_retrieve_charge/stripe_retrieve_charge.json
================================================
{
"action_title": "Retrieve a Charge",
"action_description": " Retrieve a Charge",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_retrieve_charge",
"action_output_type": "ACTION_OUTPUT_TYPE_DICT",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_CHARGE"]
}
================================================
FILE: Stripe/legos/stripe_retrieve_charge/stripe_retrieve_charge.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Dict
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
charge_id: str = Field(
title='Charge Id',
description='Charge ID'
)
def stripe_retrieve_charge_printer(output):
if isinstance(output, (list, tuple)):
pprint.pprint(output)
elif isinstance(output, dict):
pprint.pprint(output)
else:
pprint.pprint(output)
def stripe_retrieve_charge(handle, charge_id:str) -> Dict:
"""stripe_retrive_charge Retrive the Charge for given ID
:type charge_id: string
:param charge_id: Charge ID.
:rtype: Dict with response from the describe API.
"""
# Input param validation
charge = handle.Charge.retrieve(charge_id)
return charge
================================================
FILE: Stripe/legos/stripe_retrieve_customer/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_retrieve_customer/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_retrieve_customer/stripe_retrieve_customer.json
================================================
{
"action_title": "Retrive details of a customer ",
"action_description": "Retrive details of a customer ",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_retrieve_customer",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_CUSTOMER"]
}
================================================
FILE: Stripe/legos/stripe_retrieve_customer/stripe_retrieve_customer.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
customer_id: str = Field(
title='Customer Id',
description='Retrive details of a customer'
)
def stripe_retrieve_customer_printer(output):
if isinstance(output, (list, tuple)):
pprint.pprint(output)
elif isinstance(output, dict):
pprint.pprint(output)
else:
pprint.pprint(output)
def stripe_retrieve_customer(handle, customer_id:str) -> List:
"""stripe_retrieve_customer Get customer data
:type customer_id: string
:param customer_id: Retrive details of a customer.
:rtype: String with response from the describe command.
"""
# Input param validation
result = []
try:
customer = handle.Customer.retrieve(customer_id)
result.append(customer)
return result
except Exception as e:
pprint.pprint(e)
return None
================================================
FILE: Stripe/legos/stripe_retrieve_dispute/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_retrieve_dispute/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_retrieve_dispute/stripe_retrieve_dispute.json
================================================
{
"action_title": "Retrieve details of a dispute",
"action_description": "Retrieve details of a dispute",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_retrieve_dispute",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_DISPUTE"]
}
================================================
FILE: Stripe/legos/stripe_retrieve_dispute/stripe_retrieve_dispute.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
dispute_id: str = Field(
title='Dispute Id',
description='Retrieve details of a dispute'
)
def stripe_retrieve_dispute_printer(output):
if isinstance(output, (list, tuple)):
pprint.pprint(output)
elif isinstance(output, dict):
pprint.pprint(output)
else:
pprint.pprint(output)
def stripe_retrieve_dispute(handle, dispute_id:str) -> List:
"""stripe_retrieve_dispute Get Dispute data
:type dispute_id: string
:param dispute_id: Retrieve details of a dispute.
:rtype: List with response from the describe API.
"""
result = []
try:
resp = handle.Dispute.retrieve(dispute_id)
result.append(resp)
return result
except Exception as e:
pprint.pprint(e)
return None
================================================
FILE: Stripe/legos/stripe_retrieve_refund/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_retrieve_refund/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_retrieve_refund/stripe_retrieve_refund.json
================================================
{
"action_title": "Retrieve a refund",
"action_description": "Retrieve a refund",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_retrieve_refund",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_REFUND"]
}
================================================
FILE: Stripe/legos/stripe_retrieve_refund/stripe_retrieve_refund.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
refund_id: str = Field(
title='Refund Id',
description='The identifier of the refund.'
)
def stripe_retrieve_refund_printer(output):
if isinstance(output, (list, tuple)):
pprint.pprint(output)
elif isinstance(output, dict):
pprint.pprint(output)
else:
pprint.pprint(output)
def stripe_retrieve_refund(handle, refund_id:str) -> List:
"""stripe_retrieve_refund Retrieve a refund
:type refund_id: string
:param refund_id: The identifier of the refund.
:rtype: List with response from the describe API.
"""
result = []
try:
refund_obj = handle.Refund.retrieve(refund_id)
result.append(refund_obj)
return result
except Exception as e:
pprint.pprint(e)
return None
================================================
FILE: Stripe/legos/stripe_update_charge/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_update_charge/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_update_charge/stripe_update_charge.json
================================================
{
"action_title": "Update a Charge",
"action_description": "Update a Charge",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_update_charge",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_CHARGE"]
}
================================================
FILE: Stripe/legos/stripe_update_charge/stripe_update_charge.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
charge_id: str = Field(
title='Charge Id',
description='Charge ID'
)
customer: str = Field(
title='Customer Id',
description='Customer ID'
)
description: str = Field(
title='Description',
description='Description'
)
receipt_email: str = Field(
title='Email address ',
description='This is the email address that the receipt for this charge will be sent to'
)
metadata: dict = Field(
None,
title='Metadata',
description='This can be useful for storing additional information about \
the object in a structured format. For Eg. {"order_id": "6735"}'
)
shipping: dict = Field(
None,
title='Shipping Details',
description='Shipping information for the charge. Helps prevent fraud on \
charges for physical goods.'
)
fraud_details: dict = Field(
None,
title='Fraud Details',
description='A set of key-value pairs you can attach to a charge giving \
information about its riskiness'
)
transfer_group: str = Field(
None,
title='Transfer Group',
description='A string that identifies this transaction as part of a group.'
)
def stripe_update_charge_printer(output):
if isinstance(output, (list, tuple)):
pprint.pprint(output)
elif isinstance(output, dict):
pprint.pprint(output)
else:
pprint.pprint(output)
def stripe_update_charge(
handle,
charge_id: str,
customer: str,
description: str,
receipt_email: str,
metadata: dict = None,
shipping: dict = None,
fraud_details: dict = None,
transfer_group: str = "") -> List:
"""stripe_update_charge Updates the specified charge by setting the values of
the parameters passed. Any parameters not provided will be left unchanged.
:type charge_id: string
:param charge_id: Charge ID.
:type customer: string
:param customer: Customer ID.
:type description: string
:param description: Description
:type receipt_email: string
:param receipt_email: This is the email address that the
receipt for this charge will be sent to
:type metadata: dict
:param metadata: This can be useful for storing additional
information about the object in a structured format.
:type shipping: dict
:param shipping: Shipping information for the charge. Helps
prevent fraud on charges for physical goods.
:type fraud_details: dict
:param fraud_details: A set of key-value pairs you can attach
to a charge giving information about its riskiness
:type transfer_group: string
:param transfer_group: A string that identifies this transaction as part of a group.
:rtype: String with response from the describe command.
"""
# Input param validation
result = []
try:
charge = handle.Charge.modify(
charge_id,
customer=customer if customer else None,
description=description if description else None,
metadata=metadata if metadata else {},
receipt_email=receipt_email if receipt_email else None,
shipping=shipping if shipping else None,
fraud_details=fraud_details if fraud_details else None,
transfer_group=transfer_group if transfer_group else None,
)
result.append(charge)
return result
except Exception as e:
pprint.pprint(e)
return None
================================================
FILE: Stripe/legos/stripe_update_customer/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_update_customer/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_update_customer/stripe_update_customer.json
================================================
{
"action_title": "Update Customers",
"action_description": "Update Customers",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_update_customer",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_CUSTOMER"]
}
================================================
FILE: Stripe/legos/stripe_update_customer/stripe_update_customer.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
customer_id: str = Field(
title='Customer Id',
description='Customer ID'
)
name: str = Field(
title='The customer’s full name or business name.',
description='The customer’s full name or business name.'
)
phone: str = Field(
title='The customer’s phone number.',
description='The customer’s phone number.'
)
description: str = Field(
title='Description',
description='Description'
)
email: str = Field(
title='Email address ',
description='Customer’s email address'
)
balance: int = Field(
title='Current Balance',
description='Current Balance'
)
metadata: dict = Field(
None,
title='Metadata',
description='This can be useful for storing additional information about the object \
in a structured format. For Eg. {"order_id": "6735"}'
)
shipping: dict = Field(
None,
title='Shipping Details',
description='Shipping information for the customer.'
)
address: dict = Field(
None,
title='The customer’s address.',
description='The customer’s address.'
)
def stripe_update_customer_printer(output):
if isinstance(output, (list, tuple)):
pprint.pprint(output)
elif isinstance(output, dict):
pprint.pprint(output)
else:
pprint.pprint(output)
def stripe_update_customer(
handle,
customer_id: str,
name: str,
phone: str,
description: str,
email: str,
balance: int,
metadata: dict,
shipping: dict,
address: dict) -> List:
"""stripe_update_customer Update a customer
:type customer_id: string
:param customer_id: Customer ID
:type name: string
:param name: The customer’s full name or business name.
:type phone: string
:param phone: The customer’s phone number.
:type description: string
:param description: Description
:type email: string
:param email: Customer’s email address
:type balance: int
:param balance: Current Balance
:type metadata: dict
:param metadata: This can be useful for storing additional information
about the object in a structured format.
:type shipping: dict
:param shipping: Shipping information for the customer.
:type address: dict
:param address: The customer’s address.
:rtype: List with response from the describe API.
"""
# Input param validation
result = []
try:
customer = handle.Customer.modify(
customer_id,
name=name if name else None,
phone=phone if phone else None,
description=description if description else None,
balance=balance if balance else None,
email=email if email else None,
metadata=metadata if metadata else {},
address=address if address else {},
shipping=shipping if shipping else None,
)
result.append(customer)
return result
except Exception as e:
pprint.pprint(e)
return None
================================================
FILE: Stripe/legos/stripe_update_dispute/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_update_dispute/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_update_dispute/stripe_update_dispute.json
================================================
{
"action_title": "Update Dispute",
"action_description": "Update Dispute",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_update_dispute",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_DISPUTE"]
}
================================================
FILE: Stripe/legos/stripe_update_dispute/stripe_update_dispute.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
dispute_id: str = Field(
title='Dispute Id',
description='Dispute Id'
)
submit: bool = Field(
False,
title='Submit',
description='Whether to immediately submit evidence to the bank.'
)
metadata: dict = Field(
None,
title='Metadata',
description='This can be useful for storing additional information about the \
object in a structured format. For Eg. {"order_id": "6735"}'
)
evidence: dict = Field(
None,
title='Evidence',
description='Evidence to upload, to respond to a dispute.'
)
def stripe_update_dispute_printer(output):
if isinstance(output, (list, tuple)):
pprint.pprint(output)
elif isinstance(output, dict):
pprint.pprint(output)
else:
pprint.pprint(output)
def stripe_update_dispute(handle,
dispute_id: str,
submit: bool = False,
metadata=None,
evidence=None) -> List:
"""stripe_update_dispute Update a Dispute.
:type dispute_id: string
:param dispute_id: Dispute Id
:type submit: bool
:param submit: Whether to immediately submit evidence to the bank.
:type metadata: dict
:param metadata: This can be useful for storing additional information
about the object in a structured format.
:type evidence: dict
:param evidence: Evidence to upload, to respond to a dispute.
:rtype: List with response from the describe API.
"""
# Input param validation
result = []
if evidence is None:
evidence = {}
if metadata is None:
metadata = {}
try:
dispute = handle.Dispute.modify(
dispute_id,
submit=submit if submit else None,
metadata=metadata if metadata else {},
evidence=evidence if evidence else {},
)
result.append(dispute)
return result
except Exception as e:
pprint.pprint(e)
return None
================================================
FILE: Stripe/legos/stripe_update_refund/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Stripe/legos/stripe_update_refund/__init__.py
================================================
================================================
FILE: Stripe/legos/stripe_update_refund/stripe_update_refund.json
================================================
{
"action_title": "Update Refund",
"action_description": "Updates the specified refund by setting the values of the parameters passed.",
"action_type": "LEGO_TYPE_STRIPE",
"action_entry_function": "stripe_update_refund",
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_STRIPE","CATEGORY_TYPE_STRIPE_REFUND"]
}
================================================
FILE: Stripe/legos/stripe_update_refund/stripe_update_refund.py
================================================
##
## Copyright (c) 2021 unSkript, Inc
## All rights reserved.
##
import pprint
from typing import List
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
refund_id:str = Field(
title='Refund Id',
description='Refund Id'
)
metadata: dict = Field(
title='Metadata',
description='''
Updates the specified refund by setting the values of the parameters passed.
For Eg. {"order_id": "6735"}'
'''
)
def stripe_update_refund_printer(output):
if isinstance(output, (list, tuple)):
pprint.pprint(output)
elif isinstance(output, dict):
pprint.pprint(output)
else:
pprint.pprint(output)
def stripe_update_refund(handle, refund_id:str, metadata:dict) -> List:
"""stripe_update_refund Updates the specified refund by setting the values
of the parameters passed.
:type metadata: dict
:param metadata: Updates the specified refund by setting the values of
the parameters passed.
:type refund_id: string
:param refund_id: Refund Id
:rtype: List with response from the describe API.
"""
# Input param validation
result = []
try:
refund = handle.Refund.modify(
refund_id,
metadata=metadata,
)
result.append(refund)
return result
except Exception as e:
pprint.pprint(e)
return None
================================================
FILE: Terraform/README.md
================================================
# Terraform Actions
* [Execute Terraform Command](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Terraform/legos/terraform_exec_command/README.md): Execute Terraform Command
* [Get terraform handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Terraform/legos/terraform_get_handle/README.md): Get terraform handle
================================================
FILE: Terraform/__init__.py
================================================
================================================
FILE: Terraform/legos/__init__.py
================================================
================================================
FILE: Terraform/legos/terraform_exec_command/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Terraform/legos/terraform_exec_command/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Terraform/legos/terraform_exec_command/terraform_exec_command.json
================================================
{
"action_title": "Execute Terraform Command",
"action_description": "Execute Terraform Command",
"action_type": "LEGO_TYPE_TERRAFORM",
"action_version": "2.0.0",
"action_entry_function": "terraform_exec_command",
"action_needs_credential": true,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_TERRAFORM"]
}
================================================
FILE: Terraform/legos/terraform_exec_command/terraform_exec_command.py
================================================
#
# Copyright (c) 2022 unSkript.com
# All rights reserved.
#
import json
from typing import Optional
from pydantic import BaseModel, Field
class InputSchema(BaseModel):
repo: str = Field(
title='Git Repository',
description='Repository that has Terraform Scripts eg: https://github.com/acme/acme.git'
)
branch: str = Field(
title='Git Repository Branch',
description='Branch name of repository that has Terraform Scripts \
eg: master, dev, feature/multiuser'
)
dir_path: Optional[str] = Field(
title='Directory Path',
description='Directory within Repository to run the terraform command \
eg: acme, ./, acme/terrform/main'
)
command: str = Field(
title='Terraform Command',
description='Terraform Command to Execute eg: terraform init, terraform \
apply -var="instance_type=t3.micro"'
)
def terraform_exec_command(handle, repo, branch, command, dir_path:str=None) -> str:
"""terraform_exec_command Executes the terraform command
with any arguments.
:type handle: object
:param handle: Object returned from task.validate(...).
:type repo: str
:param repo: Repository that has Terraform Scripts.
:type dir_path: str
:param dir_path: Directory within Repository to run the terraform command.
:type command: str
:param command: Terraform Command to Execute.
:rtype: Str Output of the command .
"""
assert command.startswith("terraform")
print('WARNING: Please note terraform apply and terraform destroy will be run with \
-auto-approve for non-interactive run')
# Reason we are doing this instead of setting the default value in InputSchema is
# "" dont get inserted for the default value.
# causing an issue when we drag and drop in jupyter.
if dir_path is None:
dir_path = "./"
output = ''
# sanitize inputs that have come from validate
try:
result = handle.sidecar_command(
repo,
branch,
handle.credential_id,
dir_path,
command,
str("")
)
output = result.data.decode('utf-8')
output = json.loads(output)['output']
except Exception as e:
output = f"Execution was not successful {e}"
return output
================================================
FILE: Terraform/legos/terraform_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Terraform/legos/terraform_get_handle/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Terraform/legos/terraform_get_handle/terraform_get_handle.json
================================================
{
"action_title": "Get terraform handle",
"action_description": "Get terraform handle",
"action_type": "LEGO_TYPE_TERRAFORM",
"action_entry_function": "terraform_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_supports_iteration": false
}
================================================
FILE: Terraform/legos/terraform_get_handle/terraform_get_handle.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def terraform_get_handle(handle):
"""
terraform_get_handle returns the terraform handle.
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: terraform handle.
"""
return handle
================================================
FILE: Vault/__init__.py
================================================
================================================
FILE: Vault/legos/__init__.py
================================================
================================================
FILE: Vault/legos/vault_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Vault/legos/vault_get_service_health/__init__.py
================================================
================================================
FILE: Vault/legos/vault_get_service_health/vault_get_service_health.json
================================================
{
"action_title": "Get Vault service health",
"action_description": "Fetches the health of the Vault service by using hvac's sys/health call.",
"action_type": "LEGO_TYPE_VAULT",
"action_entry_function": "vault_get_service_health",
"action_needs_credential": true,
"action_output_type": "ACTION_OUTPUT_TYPE_LIST",
"action_is_check": true,
"action_next_hop": [
""
],
"action_next_hop_parameter_mapping": {},
"action_supports_iteration": true,
"action_supports_poll": true,
"action_categories":["CATEGORY_TYPE_SECOPS","LEGO_TYPE_VAULT"]
}
================================================
FILE: Vault/legos/vault_get_service_health/vault_get_service_health.py
================================================
from typing import Tuple
import hvac
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def vault_get_service_health_printer(output):
is_healthy, errors = output
if is_healthy:
print("Vault Service is Healthy.")
else:
print("Vault Service is Unhealthy.")
if errors:
print("\nErrors:")
for error in errors:
print(f" - {error}")
def vault_get_service_health(handle) -> Tuple:
"""
vault_get_service_health fetches the health of the Vault service by using hvac's sys/health call.
:type handle: object
:param handle: Handle containing the Vault instance.
:rtype: Tuple indicating if the service is healthy and an error message (or None if healthy).
"""
try:
health_data = handle.sys.read_health_status(method='GET')
# Health check is successful if Vault is initialized, not in standby, and unsealed
if health_data["initialized"] and not health_data["standby"] and not health_data["sealed"]:
return (True, None)
else:
error_msg = []
if not health_data["initialized"]:
error_msg.append({"message":"Vault is not initialized."})
if health_data["standby"]:
error_msg.append({"message":"Vault is in standby mode."})
if health_data["sealed"]:
error_msg.append({"message": "Vault is sealed."})
return (False, error_msg)
except Exception as e:
raise e
================================================
FILE: Zabbix/README.md
================================================
# Zabbix Actions
* [Get Zabbix Handle](https://github.com/unskript/Awesome-CloudOps-Automation/tree/master/Zabbix/legos/zabbix_get_handle/README.md): Get Zabbix Handle
================================================
FILE: Zabbix/__init__.py
================================================
================================================
FILE: Zabbix/legos/__init__.py
================================================
================================================
FILE: Zabbix/legos/zabbix_get_handle/README.md
================================================
[
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: Zabbix/legos/zabbix_get_handle/__init__.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
#
#
================================================
FILE: Zabbix/legos/zabbix_get_handle/zabbix_get_handle.json
================================================
{
"action_title": "Get Zabbix Handle",
"action_description": "Get Zabbix Handle",
"action_type": "LEGO_TYPE_ZABBIX",
"action_entry_function": "zabbix_get_handle",
"action_needs_credential": true,
"action_supports_poll": false,
"action_supports_iteration": false,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_ZABBIX"]
}
================================================
FILE: Zabbix/legos/zabbix_get_handle/zabbix_get_handle.py
================================================
#
# Copyright (c) 2021 unSkript.com
# All rights reserved.
#
from pydantic import BaseModel
class InputSchema(BaseModel):
pass
def zabbix_get_handle(handle):
"""zabbix_get_handle returns the Zabbix handle.
:type handle: object
:param handle: Object returned from task.validate(...).
:rtype: Zabbix Handle
"""
return handle
================================================
FILE: _config.yml
================================================
remote_theme: pages-themes/minimal@v0.2.0
title: Runbooks.sh
logo: https://i.ibb.co/s6RD5zS/logo-runbooks-4.png
description: Empowering Cloud Automation, Together.
show_downloads: true
plugins:
- jekyll-relative-links
- jekyll-remote-theme
relative_links:
enabled: true
collections: true
include:
- CONTRIBUTING.md
- README.md
- LICENSE.md
- COPYING.md
- CODE_OF_CONDUCT.md
- CONTRIBUTING.md
- ISSUE_TEMPLATE.md
- PULL_REQUEST_TEMPLATE.md
# Google Analytics
google_analytics: UA-237883650-1
================================================
FILE: all_modules_test.py
================================================
#!/usr/bin/env python
#
# Copyright (c) 2023 unSkript.com
# All rights reserved.
#
#
import glob
import os
import importlib
def get_py_files() -> list:
""" get_py_files finds out all the python files under each `CONNECTOR/legos`
directory and returns this a s list. We use glob.glob() to search through
all Connectors using the wildcard `**`. The file list is then filtered
to exclude __init__.py and return the actual python files.
:rtype: list, the list of python files
"""
f = glob.glob('./**/legos/*/*.py')
f = [x for x in f if os.path.basename(x) != '__init__.py']
return f
def test_if_importable(files: list) -> bool:
""" test_if_importable is a function that just does what it says. For the given
python file, it does a equivalent of `from \n",
" \n",
"
\n",
" \n",
"
\n", "
\n",
" CloudOps automation made simpler!\n",
"
\n",
" Explore the docs »\n",
"
\n",
"
\n",
" Visit our blog\n",
" ·\n",
" Report Bug\n",
" ·\n",
" Request Feature\n",
"
\n",
" \n",
"
\n",
" \n",
"
\n", "
\n",
" CloudOps automation made simpler!\n",
"
\n",
" Explore the docs \u00bb\n",
"
\n",
"
\n",
" Visit our blog\n",
" \u00b7\n",
" Report Bug\n",
" \u00b7\n",
" Request Feature\n",
"
\n",
" \n",
"
\n",
" \n",
"
\n", "
\n",
" CloudOps automation made simpler!\n",
"
\n",
" Explore the docs »\n",
"
\n",
"
\n",
" Visit our blog\n",
" ·\n",
" Report Bug\n",
" ·\n",
" Request Feature\n",
"
## See it in Action
You can see this Lego in action following this link [unSkript Live](https://us.app.unskript.io)
================================================
FILE: infra/legos/infra_execute_runbook/__init__.py
================================================
================================================
FILE: infra/legos/infra_execute_runbook/infra_execute_runbook.json
================================================
{
"action_title": "Infra: Execute runbook",
"action_description": "Infra: use this action to execute particular runbook with given input parameters.",
"action_type": "LEGO_TYPE_INFRA",
"action_entry_function": "infra_execute_runbook",
"action_needs_credential": false,
"action_supports_poll": true,
"action_supports_iteration": true,
"action_output_type": "ACTION_OUTPUT_TYPE_STR",
"action_categories": [ "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_INFRA" ]
}
================================================
FILE: infra/legos/infra_execute_runbook/infra_execute_runbook.py
================================================
##
# Copyright (c) 2021 unSkript, Inc
# All rights reserved.
##
import pprint
from typing import Optional
import json
from pydantic import BaseModel, Field
class unSkriptCustomType(str):
@classmethod
def __get_validators__(cls):
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield cls.validate
@classmethod
def __modify_schema__(cls, field_schema):
# __modify_schema__ should mutate the dict it receives in place,
# the returned value will be ignored
field_schema.update(
fetch_runbook_list='true'
)
@classmethod
def validate(cls, v):
if not isinstance(v, str):
raise TypeError('string required')
return cls(f'{v}')
def __repr__(self):
return f'{super().__repr__()}'
class InputSchema(BaseModel):
runbook_id: unSkriptCustomType = Field(
title='Runbook ID',
description='ID of the runbook'
)
params: Optional[dict] = Field(
title='Runbook parameters',
description='Parameters to the runbook as a dictionary.'
)
def infra_execute_runbook_printer(output):
if output is not None:
pprint.pprint(f"Runbook execution status: {output}")
def infra_execute_runbook(handle, runbook_id: str, params: dict = None) -> str:
"""execute_runbook executes particular runbook annd return execution status
:type runbook_id: str.
:param runbook_id: ID of the runbook to execute.
:type params: dict.
:param params: dictionary of runbook input parameters.
:rtype: str.
"""
try:
execution_status = handle.execute_runbook(runbook_id, json.dumps(params))
return execution_status
except Exception as e:
raise e
================================================
FILE: infra/legos/infra_workflow_done/README.md
================================================
[
================================================
FILE: templates/legos/__init__.py
================================================
================================================
FILE: templates/legos/lego.json
================================================
{
"action_title": "Your lego title",
"action_description": "Your lego description",
"action_entry_function": "name_of_the_function_implemented",
"action_supports_poll": true,
"action_supports_iteration": true,
"action_categories": [ "CATEGORY_TYPE_CLOUDOPS", "CATEGORY_TYPE_DEVOPS", "CATEGORY_TYPE_SRE","CATEGORY_TYPE_TEMPLATE"]
}
================================================
FILE: templates/legos/lego.py
================================================
================================================
FILE: templates/runbooks/StartHere.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"id": "6e397c81",
"metadata": {
"name": "Start Here",
"title": "Start Here"
},
"source": [
"Search the lego on the search bar on the right, drag and drop the lego to the cell."
]
}
],
"metadata": {
"execution_data": {
"environment_id": "1499f27c-6406-4fbd-bd1b-c6f92800018f",
"environment_name": "Staging",
"execution_id": "",
"inputs_for_searched_lego": "",
"notebook_id": "d4159cb3-6c83-4ba5-a2f7-d23c0777076b.ipynb",
"parameters": null,
"runbook_name": "gcp",
"search_string": "",
"show_tool_tip": true,
"tenant_id": "982dba5f-d9df-48ae-a5bf-ec1fc94d4882",
"tenant_url": "https://tenant-staging.alpha.unskript.io",
"user_email_id": "support+staging@unskript.com",
"workflow_id": "f8ead207-81c0-414a-a15b-76fcdefafe8d"
},
"kernelspec": {
"display_name": "unSkript (Build: 618)",
"name": "python_kubernetes"
},
"language_info": {
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"pygments_lexer": "ipython3"
},
"parameterSchema": {
"properties": {},
"required": [],
"title": "Schema",
"type": "object"
},
"parameterValues": null
},
"nbformat": 4,
"nbformat_minor": 5
}
================================================
FILE: templates/runbooks/gcp.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"id": "6e397c81",
"metadata": {
"name": "Welcome",
"title": "Welcome"
},
"source": [
"Use the below lego to start building your functionality."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7214df50-b385-4093-8147-72fc30ebd671",
"metadata": {
"accessType": "ACCESS_TYPE_UNSPECIFIED",
"actionBashCommand": false,
"actionNeedsCredential": true,
"actionRequiredLinesInCode": [],
"actionSupportsIteration": false,
"actionSupportsPoll": false,
"action_uuid": "aa0e997987a52d5a181f7d2352066443e675cd2c6893ffd8ae18c46dc2dcf8f1",
"createTime": "1970-01-01T00:00:00Z",
"currentVersion": "0.1.0",
"description": "Get GCP Handle",
"id": 92,
"index": 92,
"inputschema": [
{
"properties": {},
"title": "gcp_get_handle",
"type": "object"
}
],
"jupyter": {
"source_hidden": true
},
"legotype": "LEGO_TYPE_GCP",
"name": "Get GCP Handle",
"nouns": [
"gcp",
"handle"
],
"orderProperties": [],
"output": {
"type": ""
},
"tags": [
"gcp_get_handle"
],
"verbs": [
"get"
]
},
"outputs": [],
"source": [
"##\n",
"## Copyright (c) 2021 unSkript, Inc\n",
"## All rights reserved.\n",
"##\n",
"from pydantic import BaseModel\n",
"\n",
"\n",
"from beartype import beartype\n",
"@beartype\n",
"def gcp_get_handle(handle):\n",
" \"\"\"gcp_get_handle returns the GCP handle.\n",
"\n",
" :rtype: GCP Handle.\n",
" \"\"\"\n",
" return handle\n",
"\n",
"\n",
"def unskript_default_printer(output):\n",
" if isinstance(output, (list, tuple)):\n",
" for item in output:\n",
" print(f'item: {item}')\n",
" elif isinstance(output, dict):\n",
" for item in output.items():\n",
" print(f'item: {item}')\n",
" else:\n",
" print(f'Output for {task.name}')\n",
" print(output)\n",
"\n",
"task = Task(Workflow())\n",
"(err, hdl, args) = task.validate(vars=vars())\n",
"if err is None:\n",
" task.execute(gcp_get_handle, lego_printer=unskript_default_printer, hdl=hdl, args=args)"
]
}
],
"metadata": {
"execution_data": {
"environment_id": "1499f27c-6406-4fbd-bd1b-c6f92800018f",
"environment_name": "Staging",
"execution_id": "",
"inputs_for_searched_lego": "",
"notebook_id": "d4159cb3-6c83-4ba5-a2f7-d23c0777076b.ipynb",
"parameters": null,
"runbook_name": "gcp",
"search_string": "",
"show_tool_tip": false,
"tenant_id": "982dba5f-d9df-48ae-a5bf-ec1fc94d4882",
"tenant_url": "https://tenant-staging.alpha.unskript.io",
"user_email_id": "support+staging@unskript.com",
"workflow_id": "f8ead207-81c0-414a-a15b-76fcdefafe8d"
},
"kernelspec": {
"display_name": "unSkript (Build: 618)",
"name": "python_kubernetes"
},
"language_info": {
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"pygments_lexer": "ipython3"
},
"parameterSchema": {
"properties": {},
"required": [],
"title": "Schema",
"type": "object"
},
"parameterValues": null
},
"nbformat": 4,
"nbformat_minor": 5
}
================================================
FILE: templates/runbooks/k8s.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"id": "6e397c81",
"metadata": {},
"source": [
"Use the below lego to start building your functionality."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9cb27a11-95cb-4b20-8d41-59903faa0e52",
"metadata": {
"accessType": "ACCESS_TYPE_UNSPECIFIED",
"actionBashCommand": false,
"actionNeedsCredential": true,
"actionRequiredLinesInCode": [],
"actionSupportsIteration": true,
"actionSupportsPoll": true,
"action_uuid": "ae0b25757f0c6c0ca4b3aaf6feea636e3f193dc354f74823a7becd7d675becdc",
"collapsed": true,
"createTime": "1970-01-01T00:00:00Z",
"currentVersion": "0.1.0",
"description": "Kubectl command in python syntax.",
"id": 26,
"index": 26,
"inputschema": [
{
"properties": {
"kubectl_command": {
"description": "kubectl command eg \"kubectl get pods --all-namespaces\"",
"title": "Kubectl Command",
"type": "string"
}
},
"required": [
"kubectl_command"
],
"title": "k8s_kubectl_command",
"type": "object"
}
],
"jupyter": {
"outputs_hidden": true,
"source_hidden": true
},
"legotype": "LEGO_TYPE_K8S",
"name": "Kubectl in python syntax",
"nouns": [
"command"
],
"orderProperties": [
"kubectl_command"
],
"output": {
"type": ""
},
"tags": [
"k8s_kubectl_command"
],
"verbs": [
"execute"
]
},
"outputs": [],
"source": [
"#\n",
"# Copyright (c) 2021 unSkript.com\n",
"# All rights reserved.\n",
"#\n",
"\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",
"from beartype import beartype\n",
"@beartype\n",
"def k8s_kubectl_command_printer(output):\n",
" if output is None:\n",
" return\n",
" print(output)\n",
"\n",
"\n",
"@beartype\n",
"def k8s_kubectl_command(handle, kubectl_command: str) -> str:\n",
"\n",
" result = handle.run_native_cmd(kubectl_command)\n",
" if result is None or hasattr(result, \"stderr\") is False or result.stderr is None:\n",
" print(\n",
" f\"Error while executing command ({kubectl_command}): {result.stderr}\")\n",
" return str()\n",
"\n",
" return result.stdout\n",
"\n",
"\n",
"task = Task(Workflow())\n",
"(err, hdl, args) = task.validate(vars=vars())\n",
"if err is None:\n",
" task.execute(k8s_kubectl_command, lego_printer=k8s_kubectl_command_printer, hdl=hdl, args=args)"
]
}
],
"metadata": {
"execution_data": {
"environment_id": "1499f27c-6406-4fbd-bd1b-c6f92800018f",
"environment_name": "Staging",
"execution_id": "",
"inputs_for_searched_lego": "",
"notebook_id": "3413c470-a729-4b66-aeac-a9b362e0da42.ipynb",
"parameters": null,
"runbook_name": "k8s",
"search_string": "",
"show_tool_tip": false,
"tenant_id": "982dba5f-d9df-48ae-a5bf-ec1fc94d4882",
"tenant_url": "https://tenant-staging.alpha.unskript.io",
"user_email_id": "support+staging@unskript.com",
"workflow_id": "87d520c9-1582-43c6-8c56-f90788ef6de6"
},
"kernelspec": {
"display_name": "Python 3.9.6 ('jupyter-elyra')",
"language": "python",
"name": "python3"
},
"language_info": {
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"pygments_lexer": "ipython3",
"version": "3.9.6"
},
"parameterSchema": {
"properties": {},
"required": [],
"title": "Schema",
"type": "object"
},
"parameterValues": null,
"vscode": {
"interpreter": {
"hash": "abbf80fbfe9c242090d0fbc1079a9f03583a8e7a3457324ed37aa21600e94bd8"
}
}
},
"nbformat": 4,
"nbformat_minor": 5
}
================================================
FILE: templates/scheduler.template
================================================
#!/bin/bash
*/30 * * * * sudo -H -u root bash -c "/usr/local/bin/unskript-ctl.sh -rc --type k8s, aws --report"
================================================
FILE: tools/README.md
================================================