Repository: codeaprendiz/learn_devops Branch: main Commit: d0312962661f Files: 2053 Total size: 19.5 MB Directory structure: gitextract_25g9uy8m/ ├── .gitignore ├── README.md ├── ReadMe_static.md ├── _config.yml └── home/ ├── cloud_certifications/ │ └── aws/ │ ├── ReadMe.md │ └── taskset_aws_cloud_certifications/ │ └── task_001_aws_certified_solutions_architect_professional/ │ ├── ReadMe.md │ ├── apiGateway.md │ ├── applicationDiscoveryService.md │ ├── aurora.md │ ├── awsControlTower.md │ ├── backup.md │ ├── batch.md │ ├── bestPractices.md │ ├── billingAndCostManagement.md │ ├── certificateManager.md │ ├── cloudAdoptionReadinessTool.md │ ├── cloudFormation.md │ ├── cloudFront.md │ ├── cloudHSM.md │ ├── cloudTrail.md │ ├── cloudWatchLogs.md │ ├── cloudsearch.md │ ├── cloudwatch.md │ ├── codeBuild.md │ ├── codeCommit.md │ ├── codeDeploy.md │ ├── codePipeline.md │ ├── cognito.md │ ├── commandLineInterface.md │ ├── config.md │ ├── connect.md │ ├── dataMigrationService.md │ ├── developerToolsConsole.md │ ├── directConnect.md │ ├── dynamodb.md │ ├── ec2.md │ ├── ec2AutoScaling.md │ ├── ecs.md │ ├── elasticBeanStalk.md │ ├── elasticCache.md │ ├── elasticFileSystem.md │ ├── elasticLoadBalancing.md │ ├── eventBridge.md │ ├── guardDuty.md │ ├── iam.md │ ├── inspector.md │ ├── kinesis.md │ ├── lambda.md │ ├── lex.md │ ├── macie.md │ ├── mechanicalTurk.md │ ├── migrationHub.md │ ├── opswork.md │ ├── opsworks.md │ ├── organizations.md │ ├── quickSight.md │ ├── rds.md │ ├── redShift.md │ ├── rekognition.md │ ├── resourceAccessManager.md │ ├── route53.md │ ├── s3.md │ ├── sageMaker.md │ ├── schemaConversionTool.md │ ├── secretsManager.md │ ├── securityTokenService.md │ ├── serverMigrationService.md │ ├── serverlessApplicationModel.md │ ├── serviceCatalog.md │ ├── shield.md │ ├── simpleNotificationService.md │ ├── simpleQueueService.md │ ├── simpleWorkflowService.md │ ├── singleSignOn.md │ ├── snowball.md │ ├── snowballEdge.md │ ├── storageGateway.md │ ├── systemManager.md │ ├── transcribe.md │ ├── vpc.md │ ├── waf.md │ ├── webIdentifyFederation.md │ ├── wellArchitected.md │ └── whitepapers.md ├── cloud_providers/ │ ├── aws/ │ │ ├── ReadMe-static.md │ │ ├── ReadMe.md │ │ └── taskset_aws_cloud_providers/ │ │ ├── task_001_kms/ │ │ │ └── ReadMe.md │ │ ├── task_002_monitoring_msk/ │ │ │ └── ReadMe.md │ │ ├── task_003_redirection_using_s3_cloudfront/ │ │ │ └── ReadMe.md │ │ ├── task_004_trigger_codebuild_PR_events__eventbridge__static_branc/ │ │ │ └── ReadMe.md │ │ ├── task_005_trigger_codebuild_PR_events__eventbridge__lambda__dynamic_branches/ │ │ │ ├── ReadMe.md │ │ │ ├── buildspec.yaml │ │ │ └── lambda.py │ │ ├── task_006_codebuild_codecommit_test_reports_gradle/ │ │ │ ├── ReadMe-static.md │ │ │ ├── ReadMe.md │ │ │ └── buildspec.yaml │ │ ├── task_007_codebuild_codecommit_test_reports_mvn/ │ │ │ ├── ReadMe.md │ │ │ └── buildspec.yaml │ │ ├── task_008_trigger_lambda_from_codecommit_using_event_bridge/ │ │ │ └── ReadMe.md │ │ └── task_009_pass_vars_dynamically_from_codebuild_to_codepipeline/ │ │ └── ReadMe.md │ ├── azure/ │ │ ├── ReadMe.md │ │ └── taskset_azure_cloud_providers/ │ │ ├── task_001_capture_web_app_logs_with_app_service_diagnostics_logging/ │ │ │ └── ReadMe.md │ │ ├── task_002_devbox/ │ │ │ └── ReadMe.md │ │ ├── task_003_create_a_windows_virtual_machine/ │ │ │ └── ReadMe.md │ │ ├── task_004_connect_to_windows_virtual_machine_via_rdp/ │ │ │ └── ReadMe.md │ │ ├── task_005_create_a_generalized_image/ │ │ │ └── ReadMe.md │ │ ├── task_006_create_a_new_virtual_machine_from_a_managed_image/ │ │ │ └── ReadMe.md │ │ ├── task_007_create_an_image_of_azure_vm_from_az_cli_and_provision_a_new_vm/ │ │ │ └── ReadMe.md │ │ ├── task_008_create_an_azure_virtual_machine/ │ │ │ └── ReadMe.md │ │ ├── task_009_configure_network_access/ │ │ │ └── ReadMe.md │ │ ├── task_010_create_a_storage_blob/ │ │ │ └── ReadMe.md │ │ └── task_011_deploy_a_container_app/ │ │ └── ReadMe.md │ ├── gcp/ │ │ ├── ReadMe-static.md │ │ ├── ReadMe.md │ │ └── taskset_gcp_cloud_providers/ │ │ ├── task_001_create_budget__and_alerts/ │ │ │ └── ReadMe.md │ │ ├── task_001_intro_console_projects_iam_apis/ │ │ │ └── ReadMe.md │ │ ├── task_002_getting_started_with_cloud_market_place/ │ │ │ └── ReadMe.md │ │ ├── task_002_getting_started_with_cloud_market_place__lamp_stack/ │ │ │ └── ReadMe.md │ │ ├── task_003_vpc_networking_and_google_compute_engine/ │ │ │ └── ReadMe.md │ │ ├── task_004_getting_started_with_cloud_storage_and_cloud_sql__php/ │ │ │ ├── ReadMe.md │ │ │ └── index.php │ │ ├── task_005_getting_started_with_gke__nginx/ │ │ │ └── ReadMe.md │ │ ├── task_006_hello_cloud_run__node/ │ │ │ ├── ReadMe.md │ │ │ └── hello-world-node/ │ │ │ ├── Dockerfile │ │ │ ├── index.js │ │ │ └── package.json │ │ ├── task_007_building_a_devops_pipeline__python/ │ │ │ ├── Dockerfile │ │ │ ├── ReadMe.md │ │ │ ├── main.py │ │ │ ├── requirements.txt │ │ │ └── templates/ │ │ │ ├── index.html │ │ │ └── layout.html │ │ ├── task_008_deploying_app_to_app_engine_and_gke_and_cloudrun/ │ │ │ ├── Dockerfile │ │ │ ├── ReadMe.md │ │ │ ├── app.yaml │ │ │ ├── k8s-manifests.yaml │ │ │ ├── main.py │ │ │ ├── requirements.txt │ │ │ └── templates/ │ │ │ ├── index.html │ │ │ └── layout.html │ │ ├── task_008_deploying_app_to_app_engine_and_gke_and_cloudrun__python/ │ │ │ ├── Dockerfile │ │ │ ├── ReadMe.md │ │ │ ├── app.yaml │ │ │ ├── k8s-manifests.yaml │ │ │ ├── main.py │ │ │ ├── requirements.txt │ │ │ └── templates/ │ │ │ ├── index.html │ │ │ └── layout.html │ │ ├── task_009_monitoring_applications_in_gcp__python/ │ │ │ ├── Dockerfile │ │ │ ├── ReadMe.md │ │ │ ├── app.yaml │ │ │ ├── main.py │ │ │ ├── requirements.txt │ │ │ └── templates/ │ │ │ ├── index.html │ │ │ └── layout.html │ │ ├── task_010_alerting_in_google_cloud/ │ │ │ └── ReadMe.md │ │ ├── task_010_alerting_in_google_cloud__python/ │ │ │ ├── Dockerfile │ │ │ ├── ReadMe.md │ │ │ ├── app-engine-error-percent-policy.json │ │ │ ├── app.yaml │ │ │ ├── main.py │ │ │ ├── requirements.txt │ │ │ └── templates/ │ │ │ ├── index.html │ │ │ └── layout.html │ │ ├── task_011_service_monitoring__node/ │ │ │ └── ReadMe.md │ │ ├── task_011_service_monitoring__node_app/ │ │ │ └── ReadMe.md │ │ ├── task_012_monitoring_and_dashboarding_multiple_projects_from_a_single_workspace/ │ │ │ └── ReadMe.md │ │ ├── task_012_monitoring_and_dashboarding_multiple_projects_from_a_single_workspace__nginx/ │ │ │ └── ReadMe.md │ │ ├── task_013_compute_logging_and_monitoring/ │ │ │ └── ReadMe.md │ │ ├── task_014_log_analysis/ │ │ │ └── ReadMe.md │ │ ├── task_014_log_analysis_cloud_run__node/ │ │ │ └── ReadMe.md │ │ ├── task_015_cloud_audit_logs/ │ │ │ └── ReadMe.md │ │ ├── task_015_cloud_storage_audit_logs/ │ │ │ └── ReadMe.md │ │ ├── task_016_analyzing_network_traffic_with_vpc_flow_logs/ │ │ │ └── ReadMe.md │ │ ├── task_017_application_performance_management/ │ │ │ └── ReadMe.md │ │ ├── task_017_application_performance_management__python_and_nodejs/ │ │ │ └── ReadMe.md │ │ ├── task_018_2inst_2buckets_2iam/ │ │ │ └── ReadMe.md │ │ ├── task_019_working_with_cloud_build/ │ │ │ └── ReadMe.md │ │ ├── task_020_deploying_google_kubernetes_engine/ │ │ │ └── ReadMe.md │ │ ├── task_021_creating_google_kubernetes_engine_deployments/ │ │ │ ├── ReadMe.md │ │ │ ├── nginx-canary.yaml │ │ │ ├── nginx-deployment.yaml │ │ │ ├── nginx-svc-session-affinity.yaml │ │ │ └── service-nginx.yaml │ │ ├── task_022_configuring_persistent_storage_for_google_kubernetes_engine/ │ │ │ ├── ReadMe.md │ │ │ ├── pod-volume-demo.yaml │ │ │ ├── pvc-demo.yaml │ │ │ └── statefulset-demo.yaml │ │ ├── task_023_anthos_service_mesh_walkthrough/ │ │ │ └── ReadMe.md │ │ ├── task_024_observing_anthos_services/ │ │ │ └── ReadMe.md │ │ ├── task_025_managing_traffic_with_anthos_service_mesh/ │ │ │ ├── ReadMe.md │ │ │ ├── conditional-routing-labels.yaml │ │ │ ├── conditional-routing-req-headers.yaml │ │ │ ├── fault-injection-aborts.yaml │ │ │ ├── fault-injection-delays.yaml │ │ │ ├── manifests/ │ │ │ │ ├── destinationrule--productpage-circuit-breaker.yaml │ │ │ │ ├── destinationrule-all.yaml │ │ │ │ ├── gateway--bookinfo-gateway-port-80-http.yaml │ │ │ │ ├── virtualService-2-or-1-second-delay-to-calls-to-ratings-service.yaml │ │ │ │ ├── virtualservice--all-to-v1.yaml │ │ │ │ ├── virtualservice--bookinfo-based-on-exact-and-prefix-uri-match.yaml │ │ │ │ ├── virtualservice--half-second-timeout-for-calls-to-reviews.yaml │ │ │ │ ├── virtualservice--productpage-per-try-timeout-2s.yaml │ │ │ │ ├── virtualservice--reviews-v2-based-on-header.yaml │ │ │ │ ├── virtualservice--reviews-v3.yaml │ │ │ │ ├── virtualservice--route-req-to-v2-reviews.yaml │ │ │ │ └── virtualservice-reviews-50-v3-and-50-v1.yaml │ │ │ ├── retry.yaml │ │ │ ├── timeouts.yaml │ │ │ └── traffic-splitting.yaml │ │ ├── task_026_securing_traffic_through_anthos_service_mesh/ │ │ │ ├── PeerAuthentication--mesh-wide-mtls.yaml │ │ │ ├── PeerAuthentication--restricted-mtls.yaml │ │ │ ├── ReadMe.md │ │ │ └── manifests/ │ │ │ ├── AuthorizationPolicy--require-jwt-v2.yaml │ │ │ ├── AuthorizationPolicy--require-jwt.yaml │ │ │ ├── RequestAuthentication--jwt-example.yaml │ │ │ ├── httpbin.yaml │ │ │ └── sleep.yaml │ │ ├── task_027_cloud_source_repositories_overview/ │ │ │ └── ReadMe.md │ │ ├── task_028_managing_deployments_using_kubernetes_engine/ │ │ │ ├── ReadMe.md │ │ │ ├── cleanup.sh │ │ │ ├── deployments/ │ │ │ │ ├── auth.yaml │ │ │ │ ├── frontend.yaml │ │ │ │ ├── hello-canary.yaml │ │ │ │ ├── hello-green.yaml │ │ │ │ └── hello.yaml │ │ │ ├── nginx/ │ │ │ │ ├── frontend.conf │ │ │ │ └── proxy.conf │ │ │ ├── pods/ │ │ │ │ ├── healthy-monolith.yaml │ │ │ │ ├── monolith.yaml │ │ │ │ └── secure-monolith.yaml │ │ │ └── services/ │ │ │ ├── auth.yaml │ │ │ ├── frontend.yaml │ │ │ ├── hello-blue.yaml │ │ │ ├── hello-green.yaml │ │ │ ├── hello.yaml │ │ │ └── monolith.yaml │ │ ├── task_029_trouble_shooting_workloads_on_gke_for_sre/ │ │ │ └── ReadMe.md │ │ ├── task_030_minimal_nodejs_app_dockerize_google_artifact_registry/ │ │ │ ├── ReadMe.md │ │ │ └── test/ │ │ │ ├── Dockerfile │ │ │ └── app.js │ │ ├── task_031_hello_node_kubernetes__node/ │ │ │ ├── Dockerfile │ │ │ ├── ReadMe.md │ │ │ └── server.js │ │ ├── task_032_setting_up_jenkins_on_kubernetes_engine/ │ │ │ ├── ReadMe.md │ │ │ └── values.yaml │ │ └── task_033_continuous_delivery_with_jenkins_in_kubernetes_engine/ │ │ ├── Jenkinsfile │ │ ├── ReadMe.md │ │ └── values.yaml │ └── oci/ │ ├── ReadMe.md │ └── taskset_oci_cloud_providers/ │ ├── task_000_set_up_oci_cli/ │ │ └── ReadMe.md │ ├── task_001_oci_cli_commands/ │ │ └── ReadMe.md │ └── task_002_create_k8s_quick_create/ │ └── ReadMe.md ├── containers/ │ ├── docker/ │ │ ├── ReadMe.md │ │ └── taskset_docker_containers/ │ │ ├── task_000_commands/ │ │ │ └── ReadMe.md │ │ ├── task_001_docker_overview/ │ │ │ └── ReadMe.md │ │ ├── task_002_docker_run__detached_vs_foreground/ │ │ │ └── ReadMe.md │ │ ├── task_003_getting_started/ │ │ │ └── ReadMe.md │ │ ├── task_004_docker_run__pid_setting_and_choosing_image_with_tag/ │ │ │ └── ReadMe.md │ │ ├── task_005_docker_run__assign_name_and_allocate_pseudo_tty/ │ │ │ └── ReadMe.md │ │ ├── task_006_docker_run__expose_port_and_pull_policy_and_environment_vars/ │ │ │ └── ReadMe.md │ │ ├── task_006_run_commands_in_container/ │ │ │ └── ReadMe.md │ │ ├── task_007_docker_run__full_container_capabilities_and_set_working_dir_and_volume_mounts/ │ │ │ └── ReadMe.md │ │ ├── task_008_docker_run__hostsfile_ulimit_mem_limit/ │ │ │ └── ReadMe.md │ │ ├── task_008_docker_run__metadata_and_network_and_attach_to_stdout/ │ │ │ └── ReadMe.md │ │ ├── task_009_docker_attach/ │ │ │ └── ReadMe.md │ │ ├── task_010_docker_build/ │ │ │ ├── Dockerfile │ │ │ ├── ReadMe.md │ │ │ └── index.html │ │ ├── task_011_mongo/ │ │ │ └── ReadMe.md │ │ ├── task_012_postgres/ │ │ │ └── ReadMe.md │ │ ├── task_013_prometheus_blackbox_exporter/ │ │ │ ├── ReadMe.md │ │ │ ├── blackbox.yml │ │ │ └── prometheus.yml │ │ ├── task_014_elastic_search/ │ │ │ └── ReadMe.md │ │ ├── task_015_elastic_search_bkp_restore/ │ │ │ ├── Dockerfile │ │ │ ├── ReadMe.md │ │ │ ├── register.json │ │ │ ├── restoresnapshot.json │ │ │ └── snapshotsetting.json │ │ ├── task_016_elastic_search_backup_restore_sample_data/ │ │ │ ├── Dockerfile │ │ │ ├── ReadMe.md │ │ │ ├── createdata.json │ │ │ ├── register.json │ │ │ ├── restoresnapshot.json │ │ │ └── snapshotsetting.json │ │ ├── task_017_mem_and_cpu_limit_container/ │ │ │ └── ReadMe.md │ │ ├── task_018_mysql/ │ │ │ └── ReadMe.md │ │ ├── task_019_nginx_https_domain_test/ │ │ │ └── ReadMe.md │ │ ├── task_020_docker_commit/ │ │ │ └── ReadMe.md │ │ ├── task_021_kibana/ │ │ │ └── ReadMe.md │ │ ├── task_022_sample_app/ │ │ │ ├── ReadMe.md │ │ │ └── app/ │ │ │ ├── Dockerfile │ │ │ ├── package.json │ │ │ ├── spec/ │ │ │ │ ├── persistence/ │ │ │ │ │ └── sqlite.spec.js │ │ │ │ └── routes/ │ │ │ │ ├── addItem.spec.js │ │ │ │ ├── deleteItem.spec.js │ │ │ │ ├── getItems.spec.js │ │ │ │ └── updateItem.spec.js │ │ │ └── src/ │ │ │ ├── index.js │ │ │ ├── persistence/ │ │ │ │ ├── index.js │ │ │ │ ├── mysql.js │ │ │ │ └── sqlite.js │ │ │ ├── routes/ │ │ │ │ ├── addItem.js │ │ │ │ ├── deleteItem.js │ │ │ │ ├── getItems.js │ │ │ │ └── updateItem.js │ │ │ └── static/ │ │ │ ├── css/ │ │ │ │ ├── font-awesome/ │ │ │ │ │ ├── fa-brands-400.svg#fontawesome │ │ │ │ │ ├── fa-regular-400.svg#fontawesome │ │ │ │ │ └── fa-solid-900.svg#fontawesome │ │ │ │ └── styles.css │ │ │ ├── index.html │ │ │ └── js/ │ │ │ ├── app.js │ │ │ └── react-bootstrap.js │ │ ├── task_023_update_sample_app/ │ │ │ ├── ReadMe.md │ │ │ └── app/ │ │ │ ├── Dockerfile │ │ │ ├── package.json │ │ │ ├── spec/ │ │ │ │ ├── persistence/ │ │ │ │ │ └── sqlite.spec.js │ │ │ │ └── routes/ │ │ │ │ ├── addItem.spec.js │ │ │ │ ├── deleteItem.spec.js │ │ │ │ ├── getItems.spec.js │ │ │ │ └── updateItem.spec.js │ │ │ └── src/ │ │ │ ├── index.js │ │ │ ├── persistence/ │ │ │ │ ├── index.js │ │ │ │ ├── mysql.js │ │ │ │ └── sqlite.js │ │ │ ├── routes/ │ │ │ │ ├── addItem.js │ │ │ │ ├── deleteItem.js │ │ │ │ ├── getItems.js │ │ │ │ └── updateItem.js │ │ │ └── static/ │ │ │ ├── css/ │ │ │ │ ├── font-awesome/ │ │ │ │ │ ├── fa-brands-400.svg#fontawesome │ │ │ │ │ ├── fa-regular-400.svg#fontawesome │ │ │ │ │ └── fa-solid-900.svg#fontawesome │ │ │ │ └── styles.css │ │ │ ├── index.html │ │ │ └── js/ │ │ │ ├── app.js │ │ │ └── react-bootstrap.js │ │ ├── task_024_sample_app_persist_db/ │ │ │ ├── ReadMe.md │ │ │ └── app/ │ │ │ ├── Dockerfile │ │ │ ├── package.json │ │ │ ├── spec/ │ │ │ │ ├── persistence/ │ │ │ │ │ └── sqlite.spec.js │ │ │ │ └── routes/ │ │ │ │ ├── addItem.spec.js │ │ │ │ ├── deleteItem.spec.js │ │ │ │ ├── getItems.spec.js │ │ │ │ └── updateItem.spec.js │ │ │ └── src/ │ │ │ ├── index.js │ │ │ ├── persistence/ │ │ │ │ ├── index.js │ │ │ │ ├── mysql.js │ │ │ │ └── sqlite.js │ │ │ ├── routes/ │ │ │ │ ├── addItem.js │ │ │ │ ├── deleteItem.js │ │ │ │ ├── getItems.js │ │ │ │ └── updateItem.js │ │ │ └── static/ │ │ │ ├── css/ │ │ │ │ ├── font-awesome/ │ │ │ │ │ ├── fa-brands-400.svg#fontawesome │ │ │ │ │ ├── fa-regular-400.svg#fontawesome │ │ │ │ │ └── fa-solid-900.svg#fontawesome │ │ │ │ └── styles.css │ │ │ ├── index.html │ │ │ └── js/ │ │ │ ├── app.js │ │ │ └── react-bootstrap.js │ │ ├── task_025_sonarqube__keycloak__saml/ │ │ │ ├── .gitignore │ │ │ └── ReadMe.md │ │ ├── task_026_mssql/ │ │ │ └── ReadMe.md │ │ ├── task_027_docker_openvpn/ │ │ │ ├── Readme.md │ │ │ └── setup_vpn.sh │ │ ├── task_028_docker_engine__networking__overview/ │ │ │ └── ReadMe.md │ │ ├── task_029_docker_engine__networking__bridge_network_tutorial/ │ │ │ └── ReadMe.md │ │ └── task_030_docker_engine__networking__host_networking_tutorial/ │ │ └── ReadMe.md │ ├── docker_compose/ │ │ ├── ReadMe.md │ │ └── taskset_docker_compose_containers/ │ │ ├── task_001__gcp__traefik_letsEncrypt_tls_challenge/ │ │ │ ├── ReadMe.md │ │ │ └── docker-compose.yaml │ │ ├── task_002__gcp__traefik_letsEncrypt_http_challenge/ │ │ │ ├── ReadMe.md │ │ │ └── docker-compose.yaml │ │ ├── task_003__gcp__datadog/ │ │ │ ├── ReadMe.md │ │ │ └── docker-compose.yaml │ │ ├── task_004__local__elastic_search_backup_restore_local_with_sample_data/ │ │ │ ├── ReadMe.md │ │ │ ├── docker-compose.yml │ │ │ ├── elasticsearch.yml │ │ │ ├── restoresnapshot.json │ │ │ └── snapshotsetting.json │ │ ├── task_005__local__elastic_search_kibana/ │ │ │ ├── ReadMe.md │ │ │ └── docker-compose.yml │ │ ├── task_006__local__logstash/ │ │ │ ├── ReadMe.md │ │ │ ├── conf/ │ │ │ │ └── filter.conf │ │ │ ├── docker-compose.yml │ │ │ ├── log-sample/ │ │ │ │ └── access.log │ │ │ └── logstash.yml │ │ ├── task_007__local__prometheus_blackboxexporter_alertmanager/ │ │ │ ├── ReadMe.md │ │ │ ├── alertmanager/ │ │ │ │ └── configuration.yml │ │ │ ├── blackboxexporter/ │ │ │ │ └── config.yml │ │ │ ├── docker-compose.yml │ │ │ └── prometheus/ │ │ │ ├── alert.rules.yml │ │ │ └── prometheus.yml │ │ ├── task_008__local__mongodb_metricbeat_elasticsearch_kibana/ │ │ │ ├── ReadMe.md │ │ │ ├── docker/ │ │ │ │ └── metricbeat/ │ │ │ │ ├── Dockerfile │ │ │ │ ├── entrypoint.sh │ │ │ │ └── metricbeat.yml │ │ │ └── docker-compose.yml │ │ ├── task_009__local__natsStreaming_metricbeat_elasticsearch_kibana/ │ │ │ ├── ReadMe.md │ │ │ ├── docker/ │ │ │ │ └── metricbeat/ │ │ │ │ ├── Dockerfile │ │ │ │ ├── entrypoint.sh │ │ │ │ └── metricbeat.yml │ │ │ └── docker-compose.yml │ │ ├── task_010__local__mysql_metricbeat_elasticsearch_kibana/ │ │ │ ├── ReadMe.md │ │ │ ├── docker/ │ │ │ │ └── metricbeat/ │ │ │ │ ├── Dockerfile │ │ │ │ ├── entrypoint.sh │ │ │ │ └── metricbeat.yml │ │ │ └── docker-compose.yml │ │ ├── task_011__gcp__nginx_https_domain_test/ │ │ │ ├── ReadMe.md │ │ │ ├── certs/ │ │ │ │ ├── star_domain.com.key │ │ │ │ └── star_domain_com.chained.crt │ │ │ ├── docker-compose.yml │ │ │ └── nginx.conf │ │ ├── task_012__local__caddy_https_domain_test_with_custom_certs/ │ │ │ ├── Caddyfile │ │ │ ├── ReadMe.md │ │ │ ├── certs/ │ │ │ │ ├── star_domain.com.key │ │ │ │ └── star_domain_com.chained.crt │ │ │ ├── docker-compose.yml │ │ │ ├── index.html-renameme-to-html │ │ │ └── password-generation.yml │ │ ├── task_013__local__caddy_https_acme_and_save_certs/ │ │ │ ├── Caddyfile │ │ │ ├── ReadMe.md │ │ │ └── docker-compose.yml │ │ ├── task_014__local__reverse_proxy_nginx_home_using_caddy/ │ │ │ ├── Caddyfile │ │ │ ├── ReadMe.md │ │ │ ├── certs/ │ │ │ │ ├── chained-cert.crt │ │ │ │ └── private-key.key │ │ │ ├── docker-compose.yml │ │ │ ├── index.html-rename-me-to-html │ │ │ ├── nginx-home/ │ │ │ │ └── index.html │ │ │ └── password-generation.yml │ │ ├── task_015__local__mem_and_cpu_limit_nginx_container/ │ │ │ ├── ReadMe.md │ │ │ ├── docker-compose-v2.4.yaml │ │ │ └── docker-compose-v3.8.yaml │ │ └── task_016__gcp_local__grafana__tempo__prometheus__xk6_client_tracing/ │ │ ├── ReadMe.md │ │ ├── docker-compose.yaml │ │ └── shared/ │ │ ├── grafana-datasources.yaml │ │ ├── prometheus.yaml │ │ └── tempo.yaml │ └── kubernetes/ │ ├── README.md │ ├── concepts/ │ │ ├── ReadMe.md │ │ ├── task_001_blue_green_deployment/ │ │ │ └── ReadMe.md │ │ └── task_002_networking/ │ │ └── ReadMe.md │ └── taskset_kubernetes_containers/ │ ├── task_001__local__configure_default_CPU_requests_and_limits_for_a_namespace/ │ │ ├── ReadMe.md │ │ ├── limitrange-object.yaml │ │ ├── pod-limit-cpu.yaml │ │ ├── pod-no-limit-specified.yaml │ │ └── pod-request-cpu.yaml │ ├── task_002__local__configure_default_memory_requests_and_limits_for_a_namespace/ │ │ ├── ReadMe.md │ │ ├── limitrange-object.yaml │ │ ├── pod-limit-memory.yaml │ │ ├── pod-no-limit-specified.yaml │ │ └── pod-request-memory.yaml │ ├── task_003__local__understanding_k8s_port_forward/ │ │ └── ReadMe.md │ ├── task_004__local__expose_svc_via_nodeport/ │ │ └── ReadMe.md │ ├── task_005__local__access_svc_nodeport_via_ingress/ │ │ ├── ReadMe.md │ │ └── ingress.yaml │ ├── task_006__local__access_svc_clusterip_via_ingress/ │ │ ├── ReadMe.md │ │ └── ingress.yaml │ ├── task_007__local__jenkins_k8s/ │ │ ├── ReadMe.md │ │ ├── jenkins-agent.Dockerfile │ │ ├── jenkins-controller.Dockerfile │ │ ├── jenkins-k8s-manifests.yaml │ │ └── values.yaml │ ├── task_008__local__kafka/ │ │ ├── ReadMe.md │ │ ├── kafka-manifests.yaml │ │ └── values.yaml │ ├── task_009__local__kafdrop/ │ │ ├── ReadMe.md │ │ ├── kafdrop-manifests.yaml │ │ └── values.yaml │ ├── task_010__aws__deploy_traefik_kops_k8s_helm/ │ │ ├── ReadMe.md │ │ ├── traefik-resources.yaml │ │ └── values.yaml │ ├── task_011__aws__traefik_kops_whoami/ │ │ ├── ReadMe.md │ │ ├── traefik-resources.yaml │ │ ├── values.yaml │ │ └── whoami.yaml │ ├── task_012__aws__kops_with_traefik_customization/ │ │ ├── ReadMe.md │ │ ├── echo-app/ │ │ │ ├── dep.yaml │ │ │ ├── ingress.yaml │ │ │ └── svc.yaml │ │ ├── httpbin-app/ │ │ │ ├── dep.yaml │ │ │ ├── ingress.yaml │ │ │ └── svc.yaml │ │ ├── traefik-custom-image/ │ │ │ ├── Dockerfile │ │ │ ├── middleware/ │ │ │ │ └── middleware.yaml │ │ │ └── traefik-add-trace-id/ │ │ │ ├── .traefik.yml │ │ │ ├── README.md │ │ │ ├── go.mod │ │ │ ├── go.sum │ │ │ ├── rand-utils.go │ │ │ ├── trace-id.go │ │ │ └── trace-id_test.go │ │ └── traefik-helm-chart/ │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── Guidelines.md │ │ ├── LICENSE │ │ ├── README.md │ │ ├── crds/ │ │ │ ├── ingressroute.yaml │ │ │ ├── ingressroutetcp.yaml │ │ │ ├── ingressrouteudp.yaml │ │ │ ├── middlewares.yaml │ │ │ ├── middlewarestcp.yaml │ │ │ ├── serverstransports.yaml │ │ │ ├── tlsoptions.yaml │ │ │ ├── tlsstores.yaml │ │ │ └── traefikservices.yaml │ │ ├── templates/ │ │ │ ├── _helpers.tpl │ │ │ ├── _podtemplate.tpl │ │ │ ├── daemonset.yaml │ │ │ ├── dashboard-hook-ingressroute.yaml │ │ │ ├── deployment.yaml │ │ │ ├── gateway.yaml │ │ │ ├── gatewayclass.yaml │ │ │ ├── hpa.yaml │ │ │ ├── ingressclass.yaml │ │ │ ├── poddisruptionbudget.yaml │ │ │ ├── pvc.yaml │ │ │ ├── rbac/ │ │ │ │ ├── clusterrole.yaml │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ ├── podsecuritypolicy.yaml │ │ │ │ ├── role.yaml │ │ │ │ ├── rolebinding.yaml │ │ │ │ └── serviceaccount.yaml │ │ │ ├── service.yaml │ │ │ └── tlsoption.yaml │ │ ├── tests/ │ │ │ ├── container-config_test.yaml │ │ │ ├── daemonset-config_test.yaml │ │ │ ├── default-install_test.yaml │ │ │ ├── deployment-config_test.yaml │ │ │ ├── gateway-config_test.yaml │ │ │ ├── gatewayclass-config_test.yaml │ │ │ ├── pod-config_test.yaml │ │ │ ├── poddisruptionbudget-config_test.yaml │ │ │ ├── podsecuritypolicy-config_test.yaml │ │ │ ├── ports-config_test.yaml │ │ │ ├── rbac-config_test.yaml │ │ │ ├── service-config_test.yaml │ │ │ └── traefik-config_test.yaml │ │ ├── traefik-manifests.yaml │ │ └── values.yaml │ ├── task_013__aws__oauth2_proxy/ │ │ ├── ReadMe.md │ │ ├── kafdrop-chart/ │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── kafdrop-manifests.yaml │ │ │ ├── templates/ │ │ │ │ ├── NOTES.txt │ │ │ │ ├── _helpers.tpl │ │ │ │ ├── deployment.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ ├── ingressroute.yaml │ │ │ │ ├── service.yaml │ │ │ │ └── traefik-middleware.yaml │ │ │ └── values.yaml │ │ ├── oauth2-proxy/ │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── README.md │ │ │ ├── charts/ │ │ │ │ ├── common/ │ │ │ │ │ ├── .helmignore │ │ │ │ │ ├── Chart.yaml │ │ │ │ │ ├── README.md │ │ │ │ │ ├── templates/ │ │ │ │ │ │ ├── _affinities.tpl │ │ │ │ │ │ ├── _capabilities.tpl │ │ │ │ │ │ ├── _errors.tpl │ │ │ │ │ │ ├── _images.tpl │ │ │ │ │ │ ├── _ingress.tpl │ │ │ │ │ │ ├── _labels.tpl │ │ │ │ │ │ ├── _names.tpl │ │ │ │ │ │ ├── _secrets.tpl │ │ │ │ │ │ ├── _storage.tpl │ │ │ │ │ │ ├── _tplvalues.tpl │ │ │ │ │ │ ├── _utils.tpl │ │ │ │ │ │ ├── _warnings.tpl │ │ │ │ │ │ └── validations/ │ │ │ │ │ │ ├── _cassandra.tpl │ │ │ │ │ │ ├── _mariadb.tpl │ │ │ │ │ │ ├── _mongodb.tpl │ │ │ │ │ │ ├── _postgresql.tpl │ │ │ │ │ │ ├── _redis.tpl │ │ │ │ │ │ └── _validations.tpl │ │ │ │ │ └── values.yaml │ │ │ │ └── redis/ │ │ │ │ ├── .helmignore │ │ │ │ ├── Chart.yaml │ │ │ │ ├── README.md │ │ │ │ ├── charts/ │ │ │ │ │ └── common/ │ │ │ │ │ ├── .helmignore │ │ │ │ │ ├── Chart.yaml │ │ │ │ │ ├── README.md │ │ │ │ │ ├── templates/ │ │ │ │ │ │ ├── _affinities.tpl │ │ │ │ │ │ ├── _capabilities.tpl │ │ │ │ │ │ ├── _errors.tpl │ │ │ │ │ │ ├── _images.tpl │ │ │ │ │ │ ├── _ingress.tpl │ │ │ │ │ │ ├── _labels.tpl │ │ │ │ │ │ ├── _names.tpl │ │ │ │ │ │ ├── _secrets.tpl │ │ │ │ │ │ ├── _storage.tpl │ │ │ │ │ │ ├── _tplvalues.tpl │ │ │ │ │ │ ├── _utils.tpl │ │ │ │ │ │ ├── _warnings.tpl │ │ │ │ │ │ └── validations/ │ │ │ │ │ │ ├── _cassandra.tpl │ │ │ │ │ │ ├── _mariadb.tpl │ │ │ │ │ │ ├── _mongodb.tpl │ │ │ │ │ │ ├── _postgresql.tpl │ │ │ │ │ │ ├── _redis.tpl │ │ │ │ │ │ └── _validations.tpl │ │ │ │ │ └── values.yaml │ │ │ │ ├── ci/ │ │ │ │ │ ├── extra-flags-values.yaml │ │ │ │ │ ├── sentinel-values.yaml │ │ │ │ │ └── standalone-values.yaml │ │ │ │ ├── templates/ │ │ │ │ │ ├── NOTES.txt │ │ │ │ │ ├── _helpers.tpl │ │ │ │ │ ├── configmap.yaml │ │ │ │ │ ├── extra-list.yaml │ │ │ │ │ ├── headless-svc.yaml │ │ │ │ │ ├── health-configmap.yaml │ │ │ │ │ ├── master/ │ │ │ │ │ │ ├── psp.yaml │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ └── statefulset.yaml │ │ │ │ │ ├── metrics-svc.yaml │ │ │ │ │ ├── networkpolicy.yaml │ │ │ │ │ ├── pdb.yaml │ │ │ │ │ ├── prometheusrule.yaml │ │ │ │ │ ├── replicas/ │ │ │ │ │ │ ├── hpa.yaml │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ └── statefulset.yaml │ │ │ │ │ ├── role.yaml │ │ │ │ │ ├── rolebinding.yaml │ │ │ │ │ ├── scripts-configmap.yaml │ │ │ │ │ ├── secret.yaml │ │ │ │ │ ├── sentinel/ │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ └── statefulset.yaml │ │ │ │ │ ├── serviceaccount.yaml │ │ │ │ │ ├── servicemonitor.yaml │ │ │ │ │ └── tls-secret.yaml │ │ │ │ ├── values.schema.json │ │ │ │ └── values.yaml │ │ │ ├── oauth2-manifests.yaml │ │ │ ├── templates/ │ │ │ │ ├── NOTES.txt │ │ │ │ ├── _helpers.tpl │ │ │ │ ├── configmap.yaml │ │ │ │ ├── deployment.yaml │ │ │ │ ├── extra-list.yaml │ │ │ │ ├── ingress-route.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ ├── pdb.yaml │ │ │ │ ├── secret-authenticated-emails-file.yaml │ │ │ │ ├── secret-google.yaml │ │ │ │ ├── secret-htpasswd-file.yaml │ │ │ │ ├── secret.yaml │ │ │ │ ├── service-account.yaml │ │ │ │ ├── service.yaml │ │ │ │ └── traefik-middlewares.yaml │ │ │ └── values.yaml │ │ └── traefik/ │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── Guidelines.md │ │ ├── LICENSE │ │ ├── README.md │ │ ├── crds/ │ │ │ ├── ingressroute.yaml │ │ │ ├── ingressroutetcp.yaml │ │ │ ├── ingressrouteudp.yaml │ │ │ ├── middlewares.yaml │ │ │ ├── middlewarestcp.yaml │ │ │ ├── serverstransports.yaml │ │ │ ├── tlsoptions.yaml │ │ │ ├── tlsstores.yaml │ │ │ └── traefikservices.yaml │ │ ├── templates/ │ │ │ ├── _helpers.tpl │ │ │ ├── _podtemplate.tpl │ │ │ ├── daemonset.yaml │ │ │ ├── dashboard-hook-ingressroute.yaml │ │ │ ├── deployment.yaml │ │ │ ├── gateway.yaml │ │ │ ├── gatewayclass.yaml │ │ │ ├── hpa.yaml │ │ │ ├── ingressclass.yaml │ │ │ ├── poddisruptionbudget.yaml │ │ │ ├── pvc.yaml │ │ │ ├── rbac/ │ │ │ │ ├── clusterrole.yaml │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ ├── podsecuritypolicy.yaml │ │ │ │ ├── role.yaml │ │ │ │ ├── rolebinding.yaml │ │ │ │ └── serviceaccount.yaml │ │ │ ├── service.yaml │ │ │ └── tlsoption.yaml │ │ ├── tests/ │ │ │ ├── container-config_test.yaml │ │ │ ├── daemonset-config_test.yaml │ │ │ ├── default-install_test.yaml │ │ │ ├── deployment-config_test.yaml │ │ │ ├── gateway-config_test.yaml │ │ │ ├── gatewayclass-config_test.yaml │ │ │ ├── pod-config_test.yaml │ │ │ ├── poddisruptionbudget-config_test.yaml │ │ │ ├── podsecuritypolicy-config_test.yaml │ │ │ ├── ports-config_test.yaml │ │ │ ├── rbac-config_test.yaml │ │ │ ├── service-config_test.yaml │ │ │ └── traefik-config_test.yaml │ │ └── values.yaml │ ├── task_014__aws__traefik_kops_whoami_middleware/ │ │ ├── ReadMe.md │ │ ├── middleware.yaml │ │ ├── traefik-resources.yaml │ │ ├── values.yaml │ │ └── whoami.yaml │ ├── task_015__aws__lets_encrypt_kops_cluster/ │ │ ├── ReadMe.md │ │ ├── cluster-issuer.yaml │ │ ├── ingress.yaml │ │ ├── k8s-resources.yaml │ │ └── whoami.yaml │ ├── task_016__aws__k8s_cluster_using_kops/ │ │ └── ReadMe.md │ ├── task_017__aws__updating_a_kops_cluster/ │ │ └── ReadMe.md │ ├── task_018_aws__kong_ingress_on_eks/ │ │ ├── ReadMe.md │ │ ├── echo-app/ │ │ │ ├── dep.yaml │ │ │ ├── ingress-with-plugin.yaml │ │ │ ├── ingress.yaml │ │ │ ├── plugin.yaml │ │ │ ├── ratelimitplugin.yaml │ │ │ └── svc.yaml │ │ ├── global-plugins/ │ │ │ └── ratelimitplugin.yaml │ │ ├── httpbin-app/ │ │ │ ├── add-response-header-plugin.yaml │ │ │ ├── consumer.yaml │ │ │ ├── customizedKongIngress.yaml │ │ │ ├── dep.yaml │ │ │ ├── ingress.yaml │ │ │ ├── plugin-svc-http-auth.yaml │ │ │ ├── specific-consumer-plugin.yaml │ │ │ └── svc.yaml │ │ └── kong/ │ │ └── values.yaml │ ├── task_019__gcp__elastic_search/ │ │ └── ReadMe.md │ ├── task_020__gcp__basic_namespace_wide_kubeconfig/ │ │ ├── README.md │ │ ├── admin-run.sh │ │ ├── client-run.sh │ │ ├── csr.cnf │ │ ├── csr.yaml │ │ ├── dev-ns.yaml │ │ ├── kubeconfig.tpl │ │ ├── reset.sh │ │ ├── role-binding.yaml │ │ ├── role.yaml │ │ └── www.yaml │ ├── task_021__gcp__intermediate_namespace_wide_kubeconfig/ │ │ ├── README.md │ │ ├── admin-run.sh │ │ ├── client-run.sh │ │ ├── common-resources/ │ │ │ ├── csr.cnf-template │ │ │ ├── csr.yaml │ │ │ ├── kubeconfig.tpl │ │ │ ├── role-binding.yaml-template │ │ │ ├── role-readonly.yaml-template │ │ │ └── role-readwrite.yaml-template │ │ ├── dev/ │ │ │ ├── default/ │ │ │ │ └── groupQA/ │ │ │ │ ├── csr.yaml │ │ │ │ ├── dave.key │ │ │ │ ├── kubeconfig │ │ │ │ ├── role-binding.yaml │ │ │ │ └── role-readonly.yaml │ │ │ └── kube-system/ │ │ │ └── groupDEV/ │ │ │ ├── csr.yaml │ │ │ ├── dave.key │ │ │ ├── kubeconfig │ │ │ ├── role-binding.yaml │ │ │ └── role-readwrite.yaml │ │ ├── reset.sh │ │ ├── run-all.sh │ │ └── www.yaml │ ├── task_022__gcp__k8s_dashboard/ │ │ ├── 00-namespace.yaml │ │ ├── 05-service-account.yaml │ │ ├── 10-service.yaml │ │ ├── 15-secret.yaml │ │ ├── 20-configmap.yaml │ │ ├── 25-role.yaml │ │ ├── 30-clusterRole.yaml │ │ ├── 35-roleBinding.yaml │ │ ├── 40-clusterRoleBinding.yaml │ │ ├── 45-deployment.yaml │ │ ├── 50-service-kubernetes-dashboard.yaml │ │ ├── 55-service-dashboard-metrics-scraper.yaml │ │ ├── 60-admin-service-account.yaml │ │ └── README.md │ ├── task_023__gcp__traefik_whoami/ │ │ ├── 00-resource-crd-definition.yml │ │ ├── 05-traefik-rbac.yml │ │ ├── 10-service-account.yaml │ │ ├── 15-traefik-deployment.yaml │ │ ├── 16-traefik.toml │ │ ├── 20-traefik-service.yaml │ │ ├── 25-whoami-deployment.yaml │ │ ├── 30-whoami-service.yaml │ │ ├── 35-whoami-ingress-route.yaml │ │ └── ReadMe.md │ ├── task_024__gcp__traefik_whoami_tomlInConfigMap/ │ │ ├── 00-resource-crd-definition.yml │ │ ├── 05-traefik-rbac.yml │ │ ├── 10-service-account.yaml │ │ ├── 15-traefik-deployment.yaml │ │ ├── 20-traefik-service.yaml │ │ ├── 24-traefik-configMap.yaml │ │ ├── 25-whoami-deployment.yaml │ │ ├── 30-whoami-service.yaml │ │ ├── 35-whoami-ingress-route.yaml │ │ └── ReadMe.md │ ├── task_025__gcp__traefik_whoami_lets_encrypt/ │ │ ├── 00-resource-crd-definition.yml │ │ ├── 05-traefik-rbac.yml │ │ ├── 10-service-account.yaml │ │ ├── 15-traefik-deployment.yaml │ │ ├── 20-traefik-service.yaml │ │ ├── 25-whoami-deployment.yaml │ │ ├── 30-whoami-service.yaml │ │ ├── 35-ingress-route.yaml │ │ └── ReadMe.md │ ├── task_026__gcp__external_IP_to_access_Application_In_Cluster/ │ │ ├── ReadMe.md │ │ └── service/ │ │ └── load-balancer-example.yaml │ ├── task_027__gcp__intermediate_cluster_wide_kubeconfig/ │ │ ├── README.md │ │ ├── admin-run.sh │ │ ├── client-run.sh │ │ ├── common-resources/ │ │ │ ├── clusterRole-binding.yaml-template │ │ │ ├── clusterRole-readonly.yaml-template │ │ │ ├── clusterRole-readwrite.yaml-template │ │ │ ├── csr.cnf-template │ │ │ ├── csr.yaml │ │ │ └── kubeconfig.tpl │ │ ├── dev/ │ │ │ ├── groupDEV/ │ │ │ │ ├── clusterRole-binding.yaml │ │ │ │ ├── clusterRole-readwrite.yaml │ │ │ │ ├── csr.yaml │ │ │ │ ├── dave.key │ │ │ │ └── kubeconfig │ │ │ └── groupQA/ │ │ │ ├── clusterRole-binding.yaml │ │ │ ├── clusterRole-readonly.yaml │ │ │ ├── csr.yaml │ │ │ ├── dave.key │ │ │ └── kubeconfig │ │ ├── reset.sh │ │ ├── run-all.sh │ │ └── www.yaml │ ├── task_028__gcp__configuring_datadog/ │ │ ├── 00-clusterrole.yaml │ │ ├── 01-cluster-role.yaml │ │ ├── 05-serviceaccount.yaml │ │ ├── 06-service-account.yaml │ │ ├── 10-clusterrolebinding.yaml │ │ ├── 11-cluster-role-binding.yaml │ │ ├── 20-datadog-cluster-agent.yaml │ │ ├── 25-datadog-agent.yaml │ │ ├── 30-service.yaml │ │ ├── 35-deployment.yaml │ │ └── ReadMe.md │ ├── task_029__gcp__configuring_dns_with_static_IPs_k8_using_Ingress/ │ │ ├── ReadMe.md │ │ ├── helloweb-deployment.yaml │ │ └── helloweb-ingress.yaml │ ├── task_030__gcp__PodSecurityPolicy/ │ │ ├── ReadMe.md │ │ └── example-psp.yaml │ ├── task_031__gcp__kube_state_metrics/ │ │ ├── ReadMe.md │ │ ├── cluster-role-binding.yaml │ │ ├── cluster-role.yaml │ │ ├── deployment.yaml │ │ ├── service-account.yaml │ │ └── service.yaml │ ├── task_032__gcp__traefik_whoami_tls_custom_certs/ │ │ ├── 00-resource-crd-definition.yml │ │ ├── 05-traefik-rbac.yml │ │ ├── 10-service-account.yaml │ │ ├── 11-traefik-configmap.yaml │ │ ├── 12-secret.yaml │ │ ├── 15-traefik-deployment.yaml │ │ ├── 20-traefik-service.yaml │ │ ├── 25-whoami-deployment.yaml │ │ ├── 30-whoami-service.yaml │ │ ├── 35-whoami-ingress-route.yaml │ │ └── ReadMe.md │ ├── task_033__gcp__configuring_dns_with_static_IPs_k8_using_Service/ │ │ ├── ReadMe.md │ │ ├── helloweb-deployment.yaml │ │ └── helloweb-service.yaml │ ├── task_034__gcp__metricbeat/ │ │ ├── 00-service-account.yaml │ │ ├── 04-cluster-role.yaml │ │ ├── 08-cluster-role-binding.yaml │ │ ├── 12-configmap-metricbeat-deployment.yaml │ │ ├── 16-configmap-metricbeat-daemonset.yaml │ │ ├── 20-daemonset.yaml │ │ ├── 24-deployment.yaml │ │ └── ReadMe.md │ ├── task_035__gcp__journalbeat/ │ │ ├── 00-service-account.yaml │ │ ├── 02-cluster-role.yaml │ │ ├── 03-cluster-role-binding.yaml │ │ ├── 04-pod-security-policy.yaml │ │ ├── 08-configmap.yaml │ │ ├── 12-daemonset.yaml │ │ └── ReadMe.md │ ├── task_036__gcp_vm__cert_manager_lets_encypt_http_validation/ │ │ ├── ReadMe.md │ │ ├── cert-manager.yaml │ │ ├── certificate.yaml │ │ ├── cluster-issuer.yaml │ │ ├── dep-whoami.yaml │ │ ├── ingress.yaml │ │ └── service.yaml │ ├── task_037__gcp_k8s__cert_manager_lets_encypt_http_validation/ │ │ ├── ReadMe.md │ │ ├── cert-manager.yaml │ │ ├── certificate.yaml │ │ ├── cluster-issuer.yaml │ │ ├── dep-whoami.yaml │ │ ├── ingress.yaml │ │ ├── service.yaml │ │ └── svc.yaml │ ├── task_038__local__kind__extramounts_for_pv_and_pvc/ │ │ ├── ReadMe.md │ │ ├── kind_extramounts_config.yaml │ │ ├── pod.yaml │ │ ├── pv.yaml │ │ └── pvc.yaml │ ├── task_039__local__kind__prometheus_operator__kube_prometheus_grafana_alertmanager/ │ │ ├── ReadMe.md │ │ └── kind-config.yaml │ ├── task_040__local__kind__bitnami_loki___using_39/ │ │ ├── ReadMe.md │ │ ├── log-generator-pod.yaml │ │ └── loki.yaml │ ├── task_041__gcp_k8s__nginx_ingress/ │ │ ├── ReadMe.md │ │ ├── base/ │ │ │ ├── app/ │ │ │ │ ├── dep_whoami.yaml │ │ │ │ ├── ingress_whoami.yaml │ │ │ │ └── svc_whoami.yaml │ │ │ └── ingress-nginx/ │ │ │ ├── configmap_patch.yaml │ │ │ ├── deployment_patch.yaml │ │ │ ├── ingressclass_patch.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── namespace.yaml │ │ │ └── service_patch.yaml │ │ ├── build/ │ │ │ └── ingress_nginx_ext_all.yaml │ │ └── vendor/ │ │ └── ingress-nginx/ │ │ ├── ingress-nginx-ext-vendor.yaml │ │ └── kustomization.yaml │ ├── task_042__gcp_k8s__cert_manager_dns_validation___using_41/ │ │ ├── ReadMe.md │ │ ├── base/ │ │ │ ├── app/ │ │ │ │ ├── dep_echo.yaml │ │ │ │ ├── dep_whoami.yaml │ │ │ │ ├── ingress_echo.yaml │ │ │ │ ├── ingress_whoami.yaml │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── svc_echo.yaml │ │ │ │ └── svc_whoami.yaml │ │ │ └── cert-manager/ │ │ │ ├── certificate-letsEncryptProd.yaml │ │ │ ├── certificate-letsEncryptStaging.yaml │ │ │ ├── clusterIssuer-letsEncryptProd.yaml │ │ │ ├── clusterIssuer-letsEncryptStaging.yaml │ │ │ ├── kustomization.yaml │ │ │ └── namespace.yaml │ │ ├── build/ │ │ │ ├── app_all.yaml │ │ │ └── cert_manager_all.yaml │ │ └── vendor/ │ │ └── cert-manager/ │ │ ├── cert-manager-vendor.yaml │ │ └── kustomization.yaml │ ├── task_043_gcp_k8s__codecentric_keycloak__bitnami_postgres___using_41_42/ │ │ ├── ReadMe.md │ │ ├── base/ │ │ │ ├── keycloakx/ │ │ │ │ ├── ingress_keycloak.yaml │ │ │ │ └── kustomization.yaml │ │ │ └── postgresql/ │ │ │ └── namespace.yaml │ │ └── build/ │ │ └── keycloakx_all.yaml │ ├── task_044_gcp_k8s__prometheus_operator__kube_prometheus_grafana_alertmanager___using_41_42_43/ │ │ ├── ReadMe.md │ │ ├── base/ │ │ │ └── kube_prometheus/ │ │ │ ├── alertmanager_ingress.yaml │ │ │ ├── grafana_ingress.yaml │ │ │ ├── kustomization.yaml │ │ │ └── prometheus_ingress.yaml │ │ ├── build/ │ │ │ └── kube_prometheus_all.yaml │ │ └── vendor/ │ │ └── kube_prometheus/ │ │ ├── kustomization.yaml │ │ └── manifests/ │ │ ├── alertmanager-alertmanager.yaml │ │ ├── alertmanager-networkPolicy.yaml │ │ ├── alertmanager-podDisruptionBudget.yaml │ │ ├── alertmanager-prometheusRule.yaml │ │ ├── alertmanager-secret.yaml │ │ ├── alertmanager-service.yaml │ │ ├── alertmanager-serviceAccount.yaml │ │ ├── alertmanager-serviceMonitor.yaml │ │ ├── blackboxExporter-clusterRole.yaml │ │ ├── blackboxExporter-clusterRoleBinding.yaml │ │ ├── blackboxExporter-configuration.yaml │ │ ├── blackboxExporter-deployment.yaml │ │ ├── blackboxExporter-networkPolicy.yaml │ │ ├── blackboxExporter-service.yaml │ │ ├── blackboxExporter-serviceAccount.yaml │ │ ├── blackboxExporter-serviceMonitor.yaml │ │ ├── grafana-config.yaml │ │ ├── grafana-dashboardDatasources.yaml │ │ ├── grafana-dashboardDefinitions.yaml │ │ ├── grafana-dashboardSources.yaml │ │ ├── grafana-deployment.yaml │ │ ├── grafana-networkPolicy.yaml │ │ ├── grafana-prometheusRule.yaml │ │ ├── grafana-service.yaml │ │ ├── grafana-serviceAccount.yaml │ │ ├── grafana-serviceMonitor.yaml │ │ ├── kubePrometheus-prometheusRule.yaml │ │ ├── kubeStateMetrics-clusterRole.yaml │ │ ├── kubeStateMetrics-clusterRoleBinding.yaml │ │ ├── kubeStateMetrics-deployment.yaml │ │ ├── kubeStateMetrics-networkPolicy.yaml │ │ ├── kubeStateMetrics-prometheusRule.yaml │ │ ├── kubeStateMetrics-service.yaml │ │ ├── kubeStateMetrics-serviceAccount.yaml │ │ ├── kubeStateMetrics-serviceMonitor.yaml │ │ ├── kubernetesControlPlane-prometheusRule.yaml │ │ ├── kubernetesControlPlane-serviceMonitorApiserver.yaml │ │ ├── kubernetesControlPlane-serviceMonitorCoreDNS.yaml │ │ ├── kubernetesControlPlane-serviceMonitorKubeControllerManager.yaml │ │ ├── kubernetesControlPlane-serviceMonitorKubeScheduler.yaml │ │ ├── kubernetesControlPlane-serviceMonitorKubelet.yaml │ │ ├── nodeExporter-clusterRole.yaml │ │ ├── nodeExporter-clusterRoleBinding.yaml │ │ ├── nodeExporter-daemonset.yaml │ │ ├── nodeExporter-networkPolicy.yaml │ │ ├── nodeExporter-prometheusRule.yaml │ │ ├── nodeExporter-service.yaml │ │ ├── nodeExporter-serviceAccount.yaml │ │ ├── nodeExporter-serviceMonitor.yaml │ │ ├── prometheus-clusterRole.yaml │ │ ├── prometheus-clusterRoleBinding.yaml │ │ ├── prometheus-networkPolicy.yaml │ │ ├── prometheus-podDisruptionBudget.yaml │ │ ├── prometheus-prometheus.yaml │ │ ├── prometheus-prometheusRule.yaml │ │ ├── prometheus-roleBindingConfig.yaml │ │ ├── prometheus-roleBindingSpecificNamespaces.yaml │ │ ├── prometheus-roleConfig.yaml │ │ ├── prometheus-roleSpecificNamespaces.yaml │ │ ├── prometheus-service.yaml │ │ ├── prometheus-serviceAccount.yaml │ │ ├── prometheus-serviceMonitor.yaml │ │ ├── prometheusAdapter-apiService.yaml │ │ ├── prometheusAdapter-clusterRole.yaml │ │ ├── prometheusAdapter-clusterRoleAggregatedMetricsReader.yaml │ │ ├── prometheusAdapter-clusterRoleBinding.yaml │ │ ├── prometheusAdapter-clusterRoleBindingDelegator.yaml │ │ ├── prometheusAdapter-clusterRoleServerResources.yaml │ │ ├── prometheusAdapter-configMap.yaml │ │ ├── prometheusAdapter-deployment.yaml │ │ ├── prometheusAdapter-networkPolicy.yaml │ │ ├── prometheusAdapter-podDisruptionBudget.yaml │ │ ├── prometheusAdapter-roleBindingAuthReader.yaml │ │ ├── prometheusAdapter-service.yaml │ │ ├── prometheusAdapter-serviceAccount.yaml │ │ ├── prometheusAdapter-serviceMonitor.yaml │ │ ├── prometheusOperator-clusterRole.yaml │ │ ├── prometheusOperator-clusterRoleBinding.yaml │ │ ├── prometheusOperator-deployment.yaml │ │ ├── prometheusOperator-networkPolicy.yaml │ │ ├── prometheusOperator-prometheusRule.yaml │ │ ├── prometheusOperator-service.yaml │ │ ├── prometheusOperator-serviceAccount.yaml │ │ ├── prometheusOperator-serviceMonitor.yaml │ │ └── setup/ │ │ ├── 0alertmanagerConfigCustomResourceDefinition.yaml │ │ ├── 0alertmanagerCustomResourceDefinition.yaml │ │ ├── 0podmonitorCustomResourceDefinition.yaml │ │ ├── 0probeCustomResourceDefinition.yaml │ │ ├── 0prometheusCustomResourceDefinition.yaml │ │ ├── 0prometheusagentCustomResourceDefinition.yaml │ │ ├── 0prometheusruleCustomResourceDefinition.yaml │ │ ├── 0scrapeconfigCustomResourceDefinition.yaml │ │ ├── 0servicemonitorCustomResourceDefinition.yaml │ │ ├── 0thanosrulerCustomResourceDefinition.yaml │ │ └── namespace.yaml │ ├── task_045__local__kind__extra_port_mappings/ │ │ ├── ReadMe.md │ │ ├── kind_nodeport_config.yaml │ │ └── pod.yaml │ ├── task_046__local__kind__nodeport_with_port_mapping/ │ │ ├── ReadMe.md │ │ ├── kind_config_node_port.yaml │ │ ├── pod.yaml │ │ └── svc.yaml │ ├── task_047__local__kind__ingress_nginx/ │ │ ├── ReadMe.md │ │ ├── ingress.yaml │ │ ├── kind_config_ingress_nginx.yaml │ │ ├── pod_bar_app.yaml │ │ ├── pod_foo_app.yaml │ │ ├── svc_bar.yaml │ │ └── svc_foo.yaml │ ├── task_048__local__kind__cilium__hubble_ui__nginx_ingress/ │ │ ├── ReadMe.md │ │ ├── ingress.yaml │ │ └── kind-config.yaml │ ├── task_049__openshift__deploy_sample_backend_app/ │ │ ├── ReadMe.md │ │ └── k8s-qotd-python/ │ │ ├── deploymentconfig.yaml │ │ ├── ingress.yaml │ │ ├── quotes-deployment.yaml │ │ ├── route.yaml │ │ └── service.yaml │ ├── task_050__local__kind__headless_svc/ │ │ ├── ReadMe.md │ │ ├── headless-svc.yaml │ │ └── nginx-pods.yaml │ ├── task_051__local__kind__linkerd/ │ │ └── ReadMe.md │ ├── task_052__local__kind__linkerd_with_custom_certs/ │ │ ├── ReadMe.md │ │ ├── ca.crt │ │ ├── ca.key │ │ ├── issuer.crt │ │ └── issuer.key │ ├── task_053__local__kind__linkerd_auto_rotate_control_plane_TLS_credentials/ │ │ ├── ReadMe.md │ │ ├── ca.crt │ │ ├── ca.key │ │ ├── certificate.yaml │ │ └── issuer.yaml │ ├── task_054__local__kind__linkerd_auto_rotate_webhook_tls_credentials/ │ │ ├── ReadMe.md │ │ ├── ca.crt │ │ ├── ca.key │ │ ├── certificate-linkerd-proxy-injector.yaml │ │ ├── certificate-linkerd-proxy-validator.yaml │ │ ├── certificate-linkerd-sp-validator.yaml │ │ ├── certificate-linkerd-tap-injector.yaml │ │ ├── certificate-tap.yaml │ │ ├── webhook-issuer-viz.yaml │ │ └── webhook-issuer.yaml │ ├── task_055__aws_eks__gcp_gke___istio_getting_started/ │ │ ├── ReadMe.md │ │ ├── gateway.yaml │ │ └── virtualservice.yaml │ ├── task_056__aws_eks__gcp_gke__istio__request_routing/ │ │ ├── ReadMe.md │ │ ├── concepts.md │ │ ├── destination-rule-all.yaml │ │ ├── virtual-service-all-v1.yaml │ │ └── virtual-service-reviews-test-v2.yaml │ ├── task_057__aws_eks__gcp_gke__istio__fault_injection__http_delay__http_abort/ │ │ ├── ReadMe.md │ │ ├── destination-rule-all.yaml │ │ ├── virtual-service-all-v1.yaml │ │ ├── virtual-service-ratings-test-abort.yaml │ │ ├── virtual-service-ratings-test-delay.yaml │ │ └── virtual-service-reviews-test-v2.yaml │ ├── task_058__aws_eks__gcp_gke__istio__traffic_shifting/ │ │ ├── ReadMe.md │ │ ├── destination-rule-all.yaml │ │ ├── virtual-service-all-v1.yaml │ │ ├── virtual-service-reviews-50-v3.yaml │ │ └── virtual-service-reviews-v3.yaml │ ├── task_059__aws_eks__gcp_gke__istio__request_timeouts/ │ │ ├── ReadMe.md │ │ ├── destination-rule-all.yaml │ │ └── virtual-service-all-v1.yaml │ ├── task_060__gcp_gke__istio__tcp_traffic_shifting/ │ │ ├── ReadMe.md │ │ ├── destination-rule-all.yaml │ │ └── virtual-service-all-v1.yaml │ ├── task_061__gcp_gke__istio__circuit_breaking/ │ │ ├── ReadMe.md │ │ └── fortio-deploy.yaml │ ├── task_062__gcp_gke__istio__mirroring/ │ │ ├── ReadMe.md │ │ └── fortio-deploy.yaml │ ├── task_063__gcp_gke__istio__ingress__ingress_gateways/ │ │ └── ReadMe.md │ ├── task_064__gcp_gke__istio__ingres__secure_gateways__TLS__mTLS/ │ │ ├── ReadMe.md │ │ └── helloworld.yaml │ ├── task_065__gcp_gke__ingress_gateway_without_TLS_Termination/ │ │ ├── ReadMe.md │ │ └── nginx.conf │ ├── task_066__gcp_gke__ingress_kubernetes_ingress/ │ │ └── ReadMe.md │ └── task_067__gcp_gke__egress__accessing_external_service/ │ └── ReadMe.md ├── databases/ │ ├── mongo/ │ │ ├── ReadMe-static.md │ │ ├── ReadMe.md │ │ └── taskset_mongo_databases/ │ │ ├── task_001_connecting_to_db/ │ │ │ └── ReadMe.md │ │ ├── task_002_creating_db_inserting_data/ │ │ │ └── ReadMe.md │ │ ├── task_003_mongodump/ │ │ │ └── ReadMe.md │ │ └── task_004_mongorestore/ │ │ └── ReadMe.md │ ├── mssql/ │ │ ├── ReadMe.md │ │ ├── ReadMe_static.md │ │ └── taskset_mssql_databases/ │ │ ├── ReadMe-static.md │ │ ├── task_000_docs_and_vscode_setup/ │ │ │ ├── ReadMe.md │ │ │ └── settings.json │ │ ├── task_001_connecting_to_db/ │ │ │ └── ReadMe.md │ │ ├── task_002_show_databases/ │ │ │ └── ReadMe.md │ │ ├── task_003_drop_database/ │ │ │ └── ReadMe.md │ │ ├── task_004_get_current_database/ │ │ │ └── ReadMe.md │ │ ├── task_005_create_database/ │ │ │ └── ReadMe.md │ │ ├── task_006_check_version/ │ │ │ └── ReadMe.md │ │ ├── task_007_create_database__if_not_exists__vars_navchar/ │ │ │ └── ReadMe.md │ │ ├── task_008_count_tables_in_database/ │ │ │ └── ReadMe.md │ │ ├── task_009_switch_to_particular_db/ │ │ │ └── ReadMe.md │ │ ├── task_010_create_table__if_not_exists/ │ │ │ └── ReadMe.md │ │ ├── task_011_insert_records__if_not_exists/ │ │ │ └── ReadMe.md │ │ ├── task_012_select_all_from_table/ │ │ │ └── ReadMe.md │ │ ├── task_013_create_credential_and_select_credential/ │ │ │ └── ReadMe.md │ │ ├── task_014_full_backup_to_s3_compatible_object_storage__with_format/ │ │ │ └── ReadMe.md │ │ ├── task_015_get_connection_info__kill/ │ │ │ └── ReadMe.md │ │ ├── task_016_full_backup_to_disk__with_format/ │ │ │ └── ReadMe.md │ │ ├── task_017_show_all_tables_in_specific_database/ │ │ │ └── ReadMe.md │ │ ├── task_018_inspect_backup_to_disk__with_init_and_differential/ │ │ │ └── ReadMe.md │ │ ├── task_019_restore_full_backup__from_disk/ │ │ │ └── ReadMe.md │ │ ├── task_020_full_backup_to_disk__with_init_and_differential/ │ │ │ └── ReadMe.md │ │ ├── task_021_restore_from_full_backup_on_disk__with_init_and_differential/ │ │ │ └── ReadMe.md │ │ ├── task_022_full_base_and_differential_backup_to_s3/ │ │ │ └── ReadMe.md │ │ ├── task_023_restore_full_base_and_differential_backup_to_s3/ │ │ │ └── ReadMe.md │ │ ├── task_024_get_current_date_and_time/ │ │ │ └── ReadMe.md │ │ ├── task_025_licence_and_version/ │ │ │ └── ReadMe.md │ │ ├── task_026_check_default_schema/ │ │ │ └── ReadMe.md │ │ ├── task_027_stored_procedures/ │ │ │ └── ReadMe.md │ │ ├── task_028_create_and_validate_jobs/ │ │ │ └── ReadMe.md │ │ ├── task_029_add_jobstep_and_validate/ │ │ │ └── ReadMe.md │ │ ├── task_030_add_job_schedule_and_validate/ │ │ │ └── ReadMe.md │ │ ├── task_031_attach_schedule_to_job_and_validate/ │ │ │ └── ReadMe.md │ │ ├── task_032_add_job_to_server_and_validate/ │ │ │ └── ReadMe.md │ │ ├── task_033_logical_backup/ │ │ │ └── ReadMe.md │ │ ├── task_034_check_when_tables_in_given_database_were_last_updated/ │ │ │ └── ReadMe.md │ │ ├── task_035_slow_queries/ │ │ │ └── ReadMe.md │ │ └── task_036_monitoring_using_influxdb_telegraf_grafana/ │ │ ├── ReadMe.md │ │ └── docker-compose.yaml │ ├── mysql/ │ │ ├── ReadMe.md │ │ ├── ReadMe_static.md │ │ └── taskset_mysql_databases/ │ │ ├── task_001_connecting_to_db/ │ │ │ └── ReadMe.md │ │ ├── task_002_mysqldump/ │ │ │ └── ReadMe.md │ │ ├── task_003_managing_databases/ │ │ │ └── ReadMe.md │ │ ├── task_004_managing_user/ │ │ │ └── ReadMe.md │ │ ├── task_005_set_transaction_isolation_levels/ │ │ │ └── ReadMe.md │ │ └── task_006_managing_tables/ │ │ └── ReadMe.md │ ├── oracle19c/ │ │ ├── ReadMe.md │ │ └── taskset_oracle19c_databases/ │ │ └── task_000_clients_setup/ │ │ ├── ReadMe.md │ │ └── wallet-unzipped/ │ │ ├── cwallet.sso │ │ ├── ewallet.p12 │ │ ├── keystore.jks │ │ ├── ojdbc.properties │ │ ├── sqlnet.ora │ │ ├── tnsnames.ora │ │ └── truststore.jks │ └── postgreSQL/ │ ├── ReadMe-static.md │ ├── ReadMe.md │ └── taskset_postgreSQL_databases/ │ ├── task_001_connecting_to_db/ │ │ └── ReadMe.md │ ├── task_002_managing_tables/ │ │ └── ReadMe.md │ ├── task_003_managing_users/ │ │ └── ReadMe.md │ ├── task_004_managing_database/ │ │ └── ReadMe.md │ ├── task_005_managing_schemas/ │ │ └── ReadMe.md │ ├── task_006_checking_deadlock/ │ │ └── ReadMe.md │ └── task_007_set__show__transaction_isolation_levels/ │ └── ReadMe.md ├── devops_blogs/ │ └── ReadMe.md ├── infrastructure_as_code/ │ ├── ansible/ │ │ ├── README.md │ │ ├── ReadMe-static.md │ │ ├── install-ansible.sh │ │ └── taskset_ansible_infrastructure_as_code/ │ │ ├── task_001_commons/ │ │ │ └── playbooks/ │ │ │ ├── ansible.cfg │ │ │ ├── inventory.ini │ │ │ ├── inventory.yml │ │ │ └── playbook-install-commons.yaml │ │ ├── task_002_datadog_agent/ │ │ │ └── playbooks/ │ │ │ ├── ansible.cfg │ │ │ ├── inventory.ini │ │ │ ├── inventory.yml │ │ │ ├── playbook-install-datadog.yaml │ │ │ └── roles/ │ │ │ └── datadog-agent/ │ │ │ ├── ReadMe.md │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── handlers/ │ │ │ │ └── main.yml │ │ │ ├── tasks/ │ │ │ │ ├── agent-linux.yml │ │ │ │ ├── agent-win.yml │ │ │ │ ├── agent5-linux.yml │ │ │ │ ├── integration.yml │ │ │ │ ├── main.yml │ │ │ │ ├── os-check.yml │ │ │ │ ├── parse-version.yml │ │ │ │ ├── pkg-debian/ │ │ │ │ │ ├── install-latest.yml │ │ │ │ │ └── install-pinned.yml │ │ │ │ ├── pkg-debian.yml │ │ │ │ ├── pkg-redhat/ │ │ │ │ │ ├── install-latest.yml │ │ │ │ │ └── install-pinned.yml │ │ │ │ ├── pkg-redhat.yml │ │ │ │ ├── pkg-suse/ │ │ │ │ │ ├── install-latest.yml │ │ │ │ │ └── install-pinned.yml │ │ │ │ ├── pkg-suse.yml │ │ │ │ ├── pkg-windows-opts.yml │ │ │ │ ├── pkg-windows.yml │ │ │ │ ├── set-parse-version.yml │ │ │ │ ├── win_agent_latest.yml │ │ │ │ └── win_agent_version.yml │ │ │ └── templates/ │ │ │ ├── checks.yaml.j2 │ │ │ ├── datadog.conf.j2 │ │ │ ├── datadog.yaml.j2 │ │ │ ├── system-probe.yaml.j2 │ │ │ └── zypper.repo.j2 │ │ ├── task_003_user/ │ │ │ └── playbooks/ │ │ │ ├── ansible.cfg │ │ │ ├── inventory.ini │ │ │ ├── inventory.yml │ │ │ ├── playbook-create-user.yaml │ │ │ └── roles/ │ │ │ └── user/ │ │ │ ├── ReadMe.md │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ ├── main.yml │ │ │ └── users.yml │ │ ├── task_004_directory/ │ │ │ └── playbooks/ │ │ │ ├── ansible.cfg │ │ │ ├── inventory.ini │ │ │ ├── inventory.yml │ │ │ ├── playbook-create-directory.yaml │ │ │ └── roles/ │ │ │ └── user/ │ │ │ ├── ReadMe.md │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ ├── main.yml │ │ │ └── users.yml │ │ ├── task_005_docker/ │ │ │ └── playbooks/ │ │ │ ├── ansible.cfg │ │ │ ├── docker-compose-install.yml │ │ │ ├── docker-drone-ci-setup.yml │ │ │ ├── drone-ci.yml │ │ │ ├── drone-runner.yml │ │ │ ├── inventory.ini │ │ │ ├── inventory.yml │ │ │ ├── monit-install.yml │ │ │ ├── playbook-create-directory.yaml │ │ │ ├── playbook-create-user.yaml │ │ │ ├── playbook-install-commons.yaml │ │ │ ├── playbook-install-datadog.yaml │ │ │ ├── playbook-install-docker_docker-compose.yaml │ │ │ ├── playbook-install-elastic-search-cluster-docker.yaml │ │ │ ├── playbook-install-jenkins-docker.yaml │ │ │ ├── playbook-install-kibana-docker.yaml │ │ │ ├── playbook-install-monit.yaml │ │ │ └── roles/ │ │ │ └── docker/ │ │ │ ├── README.md │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── handlers/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ ├── docker-compose.yml │ │ │ ├── docker-users.yml │ │ │ ├── main.yml │ │ │ ├── setup-Debian.yml │ │ │ └── setup-RedHat.yml │ │ ├── task_005_drone_ci/ │ │ │ └── playbooks/ │ │ │ ├── ansible.cfg │ │ │ ├── docker-drone-ci-setup.yml │ │ │ ├── drone-ci.yml │ │ │ ├── drone-runner.yml │ │ │ ├── inventory.ini │ │ │ ├── inventory.yml │ │ │ └── roles/ │ │ │ └── drone-ci/ │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ ├── tasks/ │ │ │ │ └── main.yml │ │ │ └── templates/ │ │ │ ├── conf/ │ │ │ │ ├── docker-compose.yml │ │ │ │ └── server.env │ │ │ └── system/ │ │ │ └── drone.service │ │ ├── task_006_elastic_search_cluster_docker/ │ │ │ └── playbooks/ │ │ │ ├── ansible.cfg │ │ │ ├── inventory.ini │ │ │ ├── inventory.yml │ │ │ └── playbook-install-elastic-search-cluster-docker.yaml │ │ ├── task_007_jenkins_docker/ │ │ │ └── playbooks/ │ │ │ ├── ansible.cfg │ │ │ ├── inventory.ini │ │ │ ├── inventory.yml │ │ │ ├── playbook-install-jenkins-docker.yaml │ │ │ └── roles/ │ │ │ ├── commons/ │ │ │ │ ├── ReadMe.md │ │ │ │ ├── defaults/ │ │ │ │ │ └── main.yml │ │ │ │ └── tasks/ │ │ │ │ ├── Debian/ │ │ │ │ │ ├── install-atom-ide.yml │ │ │ │ │ ├── setup-Debian.yml │ │ │ │ │ └── setup-my-workstation.yml │ │ │ │ └── main.yml │ │ │ ├── directory/ │ │ │ │ ├── ReadMe.md │ │ │ │ ├── defaults/ │ │ │ │ │ └── main.yml │ │ │ │ └── tasks/ │ │ │ │ ├── configure.yml │ │ │ │ └── main.yml │ │ │ ├── docker/ │ │ │ │ ├── README.md │ │ │ │ ├── defaults/ │ │ │ │ │ └── main.yml │ │ │ │ ├── handlers/ │ │ │ │ │ └── main.yml │ │ │ │ └── tasks/ │ │ │ │ ├── docker-compose.yml │ │ │ │ ├── docker-users.yml │ │ │ │ ├── main.yml │ │ │ │ ├── setup-Debian.yml │ │ │ │ └── setup-RedHat.yml │ │ │ ├── jenkins-docker/ │ │ │ │ ├── defaults/ │ │ │ │ │ └── main.yml │ │ │ │ ├── handlers/ │ │ │ │ │ └── main.yaml │ │ │ │ ├── tasks/ │ │ │ │ │ ├── install-jenkins.yaml │ │ │ │ │ ├── main.yml │ │ │ │ │ └── plugins.yml │ │ │ │ └── templates/ │ │ │ │ ├── Caddyfile │ │ │ │ ├── docker-compose.yml │ │ │ │ ├── install-plugins.groovy │ │ │ │ └── plugins.txt │ │ │ └── user/ │ │ │ ├── ReadMe.md │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ ├── main.yml │ │ │ └── users.yml │ │ ├── task_008_kibana_docker/ │ │ │ └── playbooks/ │ │ │ ├── ansible.cfg │ │ │ ├── inventory.ini │ │ │ ├── inventory.yml │ │ │ ├── playbook-install-kibana-docker.yaml │ │ │ └── roles/ │ │ │ ├── commons/ │ │ │ │ ├── ReadMe.md │ │ │ │ ├── defaults/ │ │ │ │ │ └── main.yml │ │ │ │ └── tasks/ │ │ │ │ ├── Debian/ │ │ │ │ │ ├── install-atom-ide.yml │ │ │ │ │ ├── setup-Debian.yml │ │ │ │ │ └── setup-my-workstation.yml │ │ │ │ └── main.yml │ │ │ ├── directory/ │ │ │ │ ├── ReadMe.md │ │ │ │ ├── defaults/ │ │ │ │ │ └── main.yml │ │ │ │ └── tasks/ │ │ │ │ ├── configure.yml │ │ │ │ └── main.yml │ │ │ ├── docker/ │ │ │ │ ├── README.md │ │ │ │ ├── defaults/ │ │ │ │ │ └── main.yml │ │ │ │ ├── handlers/ │ │ │ │ │ └── main.yml │ │ │ │ └── tasks/ │ │ │ │ ├── docker-compose.yml │ │ │ │ ├── docker-users.yml │ │ │ │ ├── main.yml │ │ │ │ ├── setup-Debian.yml │ │ │ │ └── setup-RedHat.yml │ │ │ ├── kibana-docker/ │ │ │ │ ├── ReadMe.md │ │ │ │ ├── defaults/ │ │ │ │ │ └── main.yaml │ │ │ │ ├── handlers/ │ │ │ │ │ └── main.yaml │ │ │ │ ├── tasks/ │ │ │ │ │ ├── install-kibana.yaml │ │ │ │ │ └── main.yaml │ │ │ │ └── templates/ │ │ │ │ ├── docker-compose.yaml │ │ │ │ └── kibana.yml │ │ │ └── user/ │ │ │ ├── ReadMe.md │ │ │ ├── defaults/ │ │ │ │ └── main.yml │ │ │ └── tasks/ │ │ │ ├── main.yml │ │ │ └── users.yml │ │ └── task_009_monit/ │ │ └── playbooks/ │ │ ├── ansible.cfg │ │ ├── inventory.ini │ │ ├── inventory.yml │ │ ├── monit-install.yml │ │ └── roles/ │ │ └── monit/ │ │ ├── ReadMe.md │ │ ├── defaults/ │ │ │ └── main.yml │ │ ├── handlers/ │ │ │ └── main.yml │ │ ├── tasks/ │ │ │ ├── main.yml │ │ │ ├── os-check.yml │ │ │ ├── pkg-debian/ │ │ │ │ ├── install-monit-specific.yml │ │ │ │ └── install-monit.yml │ │ │ └── pkg-redhat/ │ │ │ └── install-monit.yaml │ │ └── templates/ │ │ └── monit/ │ │ ├── conf.d/ │ │ │ ├── sshd.conf │ │ │ └── system.conf │ │ ├── monit.id │ │ └── monitrc.conf │ └── terraform/ │ ├── aws/ │ │ ├── README.md │ │ ├── ReadMe-static.md │ │ ├── credentials.txt.example │ │ └── taskset_aws_terraform_infrastructure_as_code/ │ │ ├── task_000_initialization_setup/ │ │ │ └── ReadMe.md │ │ ├── task_001_vars_provider_ec2_dataSources/ │ │ │ ├── 00-vars.tf │ │ │ ├── 02-provider.tf │ │ │ ├── 05-instance.tf │ │ │ ├── ReadMe.md │ │ │ └── versions.tf │ │ ├── task_002_defaultVPC_sbnt_sg_kp_ec2/ │ │ │ ├── 00-var.tf │ │ │ ├── 05-vpc.tf │ │ │ ├── 15-subnet.tf │ │ │ ├── 25-security-group.tf │ │ │ ├── 30-keypair.tf │ │ │ ├── 35-ec2-instance.tf │ │ │ └── README.md │ │ ├── task_003_defaultVPC_kp_sbnt_sg_ec2_script/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 07-vpc.tf │ │ │ ├── 10-key-pair.tf │ │ │ ├── 15-subnet.tf │ │ │ ├── 20-security-group.tf │ │ │ ├── 25-instance.tf │ │ │ ├── ReadMe.md │ │ │ ├── script.sh │ │ │ └── versions.tf │ │ ├── task_004_vars_provider_ec2_output/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 10-instance.tf │ │ │ ├── ReadMe.md │ │ │ ├── private_ips.txt │ │ │ └── versions.tf │ │ ├── task_005_vars_provider_ec2_remoteStateInS3/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 10-instance.tf │ │ │ ├── 15-backend.tf │ │ │ ├── ReadMe.md │ │ │ └── versions.tf │ │ ├── task_006_defaultVPC_defaultSbnt_modules_kp/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 10-default_vpc.tf │ │ │ ├── 15-modules.tf │ │ │ ├── 20-key.tf │ │ │ ├── ReadMe.md │ │ │ ├── mykey │ │ │ ├── mykey.pub │ │ │ └── versions.tf │ │ ├── task_007_customVPC_igw_sbnt_rt_sg_kp_ec2/ │ │ │ ├── 00-var.tf │ │ │ ├── 05-vpc.tf │ │ │ ├── 10-igw.tf │ │ │ ├── 15-subnet.tf │ │ │ ├── 20-route-tables.tf │ │ │ ├── 25-security-group.tf │ │ │ ├── 30-keypair.tf │ │ │ ├── 35-ec2-instance.tf │ │ │ ├── README.md │ │ │ └── versions.tf │ │ ├── task_008_customVPC_3PriSbnts_3PubSbnts_nat_igw_rt/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 15-vpc.tf │ │ │ ├── 20-nat.tf │ │ │ ├── ReadMe.md │ │ │ └── versions.tf │ │ ├── task_009_customVPC_3PriSbnts_3PubSbnts_nat_igw_rt_ec2_ebs/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 15-vpc.tf │ │ │ ├── 16-securitygroup.tf │ │ │ ├── 17-key.tf │ │ │ ├── 20-nat.tf │ │ │ ├── 25-instance.tf │ │ │ ├── ReadMe.md │ │ │ └── versions.tf │ │ ├── task_010_customVPC_3PriSbnts_3PubSbnts_nat_igw_rt_ec2_ebs_withMount/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 15-vpc.tf │ │ │ ├── 16-securitygroup.tf │ │ │ ├── 17-key.tf │ │ │ ├── 20-nat.tf │ │ │ ├── 22-cloudinit.tf │ │ │ ├── 25-instance.tf │ │ │ ├── ReadMe.md │ │ │ ├── scripts/ │ │ │ │ ├── init.cfg │ │ │ │ └── volumes.sh │ │ │ └── versions.tf │ │ ├── task_011_route53/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 250-r53-devopslink_zone.tf │ │ │ ├── 30-output.tf │ │ │ ├── ReadMe.md │ │ │ └── versions.tf │ │ ├── task_012_rds_vpc_ec2/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 15-vpc.tf │ │ │ ├── 17-key.tf │ │ │ ├── 18-securitygroup.tf │ │ │ ├── 20-instance.tf │ │ │ ├── 25-rds.tf │ │ │ ├── 30-output.tf │ │ │ ├── ReadMe.md │ │ │ └── versions.tf │ │ ├── task_013_IAM/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 10-iam.tf │ │ │ ├── ReadMe.md │ │ │ └── versions.tf │ │ ├── task_014_IAM_roles_s3_upload_to_s3/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 10-iam.tf │ │ │ ├── 15-vpc.tf │ │ │ ├── 17-key.tf │ │ │ ├── 18-securitygroup.tf │ │ │ ├── 20-instance.tf │ │ │ ├── 25-s3.tf │ │ │ ├── 30-output.tf │ │ │ ├── ReadMe.md │ │ │ └── versions.tf │ │ ├── task_015_autoscaling_cloudwatchAlarm_ec2_launchConfiguration/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 15-vpc.tf │ │ │ ├── 17-key.tf │ │ │ ├── 18-securitygroup.tf │ │ │ ├── 20-autoscalingpolicy.tf │ │ │ ├── 25-autoscaling.tf │ │ │ ├── 30-sns.tf │ │ │ ├── ReadMe.md │ │ │ └── versions.tf │ │ ├── task_016_ELB_autoscaling/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 15-vpc.tf │ │ │ ├── 17-key.tf │ │ │ ├── 18-securitygroup.tf │ │ │ ├── 19-elb.tf │ │ │ ├── 20-autoscaling.tf │ │ │ ├── 30-output.tf │ │ │ ├── ReadMe.md │ │ │ └── versions.tf │ │ ├── task_017_Elastic_Beanstalk/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 10-iam.tf │ │ │ ├── 15-vpc.tf │ │ │ ├── 17-key.tf │ │ │ ├── 18-securitygroup.tf │ │ │ ├── 25-rds.tf │ │ │ ├── 28-elasticbeanstalk.tf │ │ │ ├── 30-output.tf │ │ │ ├── ReadMe.md │ │ │ └── versions.tf │ │ ├── task_018_create_ECR_repo/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 15-ecr.tf │ │ │ ├── ReadMe.md │ │ │ ├── output.tf │ │ │ └── versions.tf │ │ ├── task_019_ECS/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 10-iam.tf │ │ │ ├── 15-vpc.tf │ │ │ ├── 17-key.tf │ │ │ ├── 18-securitygroup.tf │ │ │ ├── 20-ecr.tf │ │ │ ├── 25-ecs.tf │ │ │ ├── 28-myapp.tf │ │ │ ├── 30-output.tf │ │ │ ├── templates/ │ │ │ │ └── app.json.tpl │ │ │ └── versions.tf │ │ ├── task_020_s3_iam_using_modules/ │ │ │ ├── ReadMe.md │ │ │ └── s3-bucket.tf │ │ ├── task_021_k8s_cluster_typhoon/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 25-cluster.tf │ │ │ ├── 30-output.tf │ │ │ └── ReadMe.md │ │ ├── task_022_route53_ec2/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-instance.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 10-route53.tf │ │ │ ├── 250-r53-devopslink_zone.tf │ │ │ ├── 30-output.tf │ │ │ ├── ReadMe.md │ │ │ └── versions.tf │ │ ├── task_023_terragrunt_ec2/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 10-ec2.tf │ │ │ ├── ReadMe.md │ │ │ └── run.sh │ │ ├── task_024_certificate_manager/ │ │ │ └── ReadMe.md │ │ ├── task_025_terragrunt_iam_user/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 10-iam-user.tf │ │ │ ├── ReadMe.md │ │ │ └── run.sh │ │ ├── task_026_terragrunt_s3_bucket/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 10-s3-bucket.tf │ │ │ ├── ReadMe.md │ │ │ ├── hello.txt │ │ │ └── run.sh │ │ ├── task_027_terragrunt_cdn/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 10-cdn.tf │ │ │ ├── ReadMe.md │ │ │ ├── index.html-rename-me-to-html │ │ │ ├── run.sh │ │ │ └── secret.tfvars.example │ │ ├── task_029_ec2_jenkins_instance_and_slave_instance/ │ │ │ ├── 00-vars.tf │ │ │ ├── 05-provider.tf │ │ │ ├── 10-data.tf │ │ │ ├── 101-ec2-jenkins.tf │ │ │ ├── 102-ec2-jenkins-slave.tf │ │ │ ├── 105-sg.tf │ │ │ ├── 15-key-pair.tf │ │ │ ├── ReadMe.md │ │ │ ├── jenkins.pem.pub │ │ │ └── run.sh │ │ ├── task_030_creating_eks/ │ │ │ ├── README.md │ │ │ ├── eks-cluster.tf │ │ │ ├── kubernetes-dashboard-admin.rbac.yaml │ │ │ ├── kubernetes.tf │ │ │ ├── outputs.tf │ │ │ ├── run.sh │ │ │ ├── security-groups.tf │ │ │ ├── versions.tf │ │ │ └── vpc.tf │ │ └── task_031_creating_eks_spot/ │ │ ├── README.md │ │ ├── eks-cluster.tf │ │ ├── kubernetes-dashboard-admin.rbac.yaml │ │ ├── kubernetes.tf │ │ ├── outputs.tf │ │ ├── run.sh │ │ ├── security-groups.tf │ │ ├── versions.tf │ │ └── vpc.tf │ ├── gcp/ │ │ ├── ReadMe-auto-v2.md │ │ ├── ReadMe.md │ │ ├── taskset_gcp_terraform_infrastructure_as_code/ │ │ │ ├── task_001_vpc_2inst_cloudshell/ │ │ │ │ ├── ReadMe.md │ │ │ │ ├── instance/ │ │ │ │ │ ├── main.tf │ │ │ │ │ └── variables.tf │ │ │ │ ├── mynetwork.tf │ │ │ │ └── provider.tf │ │ │ ├── task_002_1inst_dflt_vpc/ │ │ │ │ ├── ReadMe.md │ │ │ │ └── main.tf │ │ │ ├── task_003_vars_and_resource_dependencies/ │ │ │ │ ├── ReadMe.md │ │ │ │ ├── exp.tf │ │ │ │ ├── instance.tf │ │ │ │ ├── outputs.tf │ │ │ │ ├── provider.tf │ │ │ │ └── variables.tf │ │ │ ├── task_004_creating_remote_backend/ │ │ │ │ ├── ReadMe.md │ │ │ │ └── main.tf │ │ │ ├── task_005_deploy_k8s_loadbalancer_service/ │ │ │ │ ├── ReadMe.md │ │ │ │ ├── k8s.tf │ │ │ │ ├── main.tf │ │ │ │ ├── test.sh │ │ │ │ └── versions.tf │ │ │ ├── task_006_modular_load_balancing_regional_load_balancer/ │ │ │ │ ├── ReadMe.md │ │ │ │ ├── apply.log │ │ │ │ ├── main.tf │ │ │ │ ├── outputs.tf │ │ │ │ ├── terraform-google-lb/ │ │ │ │ │ └── examples/ │ │ │ │ │ └── basic/ │ │ │ │ │ ├── locals.tf │ │ │ │ │ ├── main.tf │ │ │ │ │ ├── network.tf │ │ │ │ │ ├── outputs.tf │ │ │ │ │ ├── provider.tf │ │ │ │ │ ├── templates/ │ │ │ │ │ │ └── gceme.sh.tpl │ │ │ │ │ ├── variables.tf │ │ │ │ │ └── versions.tf │ │ │ │ ├── variables.tf │ │ │ │ └── versions.tf │ │ │ ├── task_007_cloud_sql/ │ │ │ │ ├── ReadMe.md │ │ │ │ ├── main.tf │ │ │ │ ├── outputs.tf │ │ │ │ └── variables.tf │ │ │ └── task_008_building_a_vpn_between_gcp_and_aws/ │ │ │ ├── ReadMe.md │ │ │ └── autonetdeploy-multicloudvpn/ │ │ │ ├── aws_set_credentials.sh │ │ │ ├── create_instance.sh │ │ │ ├── gcp_set_credentials.sh │ │ │ ├── gcp_set_project.sh │ │ │ ├── get_terraform.sh │ │ │ ├── migrate_sa_roles.sh │ │ │ └── terraform/ │ │ │ ├── aws_compute.tf │ │ │ ├── aws_networking.tf │ │ │ ├── aws_outputs.tf │ │ │ ├── aws_security.tf │ │ │ ├── aws_variables.tf │ │ │ ├── gcp_compute.tf │ │ │ ├── gcp_networking.tf │ │ │ ├── gcp_outputs.tf │ │ │ ├── gcp_security.tf │ │ │ ├── gcp_variables.tf │ │ │ ├── main.tf │ │ │ ├── run_graph.sh │ │ │ └── vm_userdata.sh │ │ └── update-readme.php │ └── oci/ │ ├── ReadMe-static.md │ ├── ReadMe.md │ └── taskset_oci_terraform_infrastructure_as_code/ │ ├── task_000_initialization_setup/ │ │ ├── 00-vars.tf │ │ ├── ReadMe.md │ │ ├── availability-domains.tf │ │ ├── output.tf │ │ └── provider.tf │ ├── task_001_create_a_compartment/ │ │ ├── 00-vars.tf │ │ ├── 01-provider.tf │ │ ├── 02-compartments.tf │ │ ├── 03-outputs.tf │ │ └── ReadMe.md │ ├── task_002_create_vcn/ │ │ ├── 00-vars.tf │ │ ├── 01-provider.tf │ │ ├── 02-vcn-module.tf │ │ ├── 05-private-security-list.tf │ │ ├── 06-public-security-list.tf │ │ ├── 08-private-subnet.tf │ │ ├── 09-public-subnet.tf │ │ ├── 10-outputs.tf │ │ └── ReadMe.md │ └── task_003_create_instance/ │ ├── 00-vars.tf │ ├── 01-provider.tf │ ├── 02-compartments.tf │ ├── 03-outputs.tf │ ├── 04-compute.tf │ ├── ReadMe.md │ └── availability-domains.tf ├── interview/ │ ├── ReadMe-static.md │ ├── ReadMe.md │ └── coding_assignments/ │ ├── ReadMe.md │ └── taskset_coding_assignments_interview/ │ ├── ReadMe.md │ ├── task_001_nodejs_docker_app/ │ │ ├── ReadMe.md │ │ └── node_project/ │ │ ├── .dockerignore │ │ ├── Dockerfile │ │ ├── app.js │ │ ├── package.json │ │ └── views/ │ │ ├── css/ │ │ │ └── styles.css │ │ ├── index.html │ │ └── sharks.html │ ├── task_002_nodejs_mongo_docker/ │ │ ├── ReadMe.md │ │ └── node_project/ │ │ ├── .dockerignore │ │ ├── .gitignore │ │ ├── Dockerfile │ │ ├── app.js │ │ ├── controllers/ │ │ │ └── sharks.js │ │ ├── db.js │ │ ├── models/ │ │ │ └── sharks.js │ │ ├── package.json │ │ ├── routes/ │ │ │ ├── index.js │ │ │ └── sharks.js │ │ └── views/ │ │ ├── css/ │ │ │ └── styles.css │ │ ├── getshark.html │ │ ├── index.html │ │ └── sharks.html │ ├── task_003_nodejs_mongo_docker_compose/ │ │ ├── README.md │ │ └── node_project/ │ │ ├── .dockerignore │ │ ├── .gitignore │ │ ├── Dockerfile │ │ ├── app.js │ │ ├── controllers/ │ │ │ └── sharks.js │ │ ├── db.js │ │ ├── docker-compose.yaml │ │ ├── models/ │ │ │ └── sharks.js │ │ ├── package.json │ │ ├── routes/ │ │ │ ├── index.js │ │ │ └── sharks.js │ │ ├── views/ │ │ │ ├── css/ │ │ │ │ └── styles.css │ │ │ ├── getshark.html │ │ │ ├── index.html │ │ │ └── sharks.html │ │ └── wait-for.sh │ ├── task_004_nodejs_mongo_k8s/ │ │ ├── README.md │ │ ├── db-deployment.yaml │ │ ├── db-service.yaml │ │ ├── dbdata-persistentvolumeclaim.yaml │ │ ├── node_project/ │ │ │ ├── .dockerignore │ │ │ ├── .gitignore │ │ │ ├── Dockerfile │ │ │ ├── app.js │ │ │ ├── controllers/ │ │ │ │ └── sharks.js │ │ │ ├── db.js │ │ │ ├── docker-compose.yaml │ │ │ ├── models/ │ │ │ │ └── sharks.js │ │ │ ├── package.json │ │ │ ├── routes/ │ │ │ │ ├── index.js │ │ │ │ └── sharks.js │ │ │ ├── views/ │ │ │ │ ├── css/ │ │ │ │ │ └── styles.css │ │ │ │ ├── getshark.html │ │ │ │ ├── index.html │ │ │ │ └── sharks.html │ │ │ └── wait-for.sh │ │ ├── nodejs-deployment.yaml │ │ ├── nodejs-env-configmap.yaml │ │ ├── nodejs-service.yaml │ │ └── secret.yaml │ ├── task_005_nodejs_mongo_k8s_helm_scale/ │ │ ├── README.md │ │ ├── mongodb-values.yaml │ │ ├── node_project/ │ │ │ ├── .dockerignore │ │ │ ├── .gitignore │ │ │ ├── Dockerfile │ │ │ ├── app.js │ │ │ ├── controllers/ │ │ │ │ └── sharks.js │ │ │ ├── db.js │ │ │ ├── docker-compose.yaml │ │ │ ├── models/ │ │ │ │ └── sharks.js │ │ │ ├── package.json │ │ │ ├── routes/ │ │ │ │ ├── index.js │ │ │ │ └── sharks.js │ │ │ ├── views/ │ │ │ │ ├── css/ │ │ │ │ │ └── styles.css │ │ │ │ ├── getshark.html │ │ │ │ ├── index.html │ │ │ │ └── sharks.html │ │ │ └── wait-for.sh │ │ ├── nodeapp/ │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── templates/ │ │ │ │ ├── NOTES.txt │ │ │ │ ├── _helpers.tpl │ │ │ │ ├── configmap.yaml │ │ │ │ ├── deployment.yaml │ │ │ │ ├── hpa.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ ├── secret.yaml │ │ │ │ ├── service.yaml │ │ │ │ ├── serviceaccount.yaml │ │ │ │ └── tests/ │ │ │ │ └── test-connection.yaml │ │ │ └── values.yaml │ │ ├── resources-app.yaml │ │ ├── resources-db.yaml │ │ └── secret.yaml │ ├── task_006_nginx_docker/ │ │ ├── Dockerfile │ │ ├── ReadMe.md │ │ ├── conf.d/ │ │ │ └── default.conf │ │ └── html/ │ │ └── index.html │ ├── task_007_eks_on_demand/ │ │ ├── README.md │ │ ├── credentials.txt.example │ │ ├── eks-cluster.tf │ │ ├── kubernetes-dashboard-admin.rbac.yaml │ │ ├── kubernetes.tf │ │ ├── outputs.tf │ │ ├── run.sh │ │ ├── security-groups.tf │ │ ├── versions.tf │ │ └── vpc.tf │ ├── task_008_k8s_nginx/ │ │ ├── ReadMe.md │ │ ├── dep.yaml │ │ ├── hpa.yaml │ │ ├── ingress.yaml │ │ └── nginx-svc-nodeport.yaml │ ├── task_009_eks_spot_and_on_demand/ │ │ ├── README.md │ │ ├── credentials.txt.example │ │ ├── eks-cluster.tf │ │ ├── kubernetes-dashboard-admin.rbac.yaml │ │ ├── kubernetes.tf │ │ ├── outputs.tf │ │ ├── run.sh │ │ ├── security-groups.tf │ │ ├── versions.tf │ │ └── vpc.tf │ └── task_010_logging_and_monitoring/ │ ├── ReadMe.md │ ├── nginx.yaml │ └── values.yaml ├── observability/ │ ├── ReadMe.md │ ├── metrics/ │ │ ├── README.md │ │ └── taskset_metrics_observability/ │ │ ├── task_001_host/ │ │ │ └── ReadMe.md │ │ ├── task_002_system/ │ │ │ └── ReadMe.md │ │ └── task_003_kubernetes/ │ │ └── ReadMe.md │ ├── opentelemetry/ │ │ ├── ReadMe.md │ │ └── taskset_opentelemetry_observability/ │ │ ├── task_001_go_dice__traces_and_metrics_to_console/ │ │ │ ├── ReadMe.md │ │ │ ├── go.mod │ │ │ ├── go.sum │ │ │ ├── main.go │ │ │ ├── otel.go │ │ │ └── rolldice.go │ │ └── task_002_go__traces_gRPC_OTLP__jaeger/ │ │ ├── ReadMe.md │ │ ├── go.mod │ │ ├── go.sum │ │ └── main.go │ └── prometheus/ │ ├── ReadMe.md │ └── taskset_prometheus_observability/ │ ├── task_001_getting_started_with_prometheus/ │ │ ├── ReadMe.md │ │ ├── prometheus_1.yml │ │ └── prometheus_2.yml │ ├── task_002_understanding_metric_types/ │ │ └── ReadMe.md │ └── task_003_instrumenting_http_server_in_go/ │ ├── ReadMe.md │ ├── go.mod │ ├── go.sum │ ├── prometheus.yml │ └── server.go ├── os_and_concepts/ │ ├── ReadMe.md │ ├── commands_linux/ │ │ ├── ReadMe.md │ │ └── taskset_commands_linux_os_and_concepts/ │ │ ├── task_001_arp/ │ │ │ └── ReadMe.md │ │ ├── task_002_awk/ │ │ │ └── ReadMe.md │ │ ├── task_003_chage/ │ │ │ └── ReadMe.md │ │ ├── task_004_chmod/ │ │ │ └── ReadMe.md │ │ ├── task_005_chown/ │ │ │ └── ReadMe.md │ │ ├── task_006_cksum/ │ │ │ └── ReadMe.md │ │ ├── task_007_clear/ │ │ │ └── ReadMe.md │ │ ├── task_008_cp/ │ │ │ └── ReadMe.md │ │ ├── task_009_crontab/ │ │ │ └── ReadMe.md │ │ ├── task_010_curl/ │ │ │ └── ReadMe.md │ │ ├── task_011_cut/ │ │ │ └── ReadMe.md │ │ ├── task_012_df/ │ │ │ └── ReadMe.md │ │ ├── task_013_diff/ │ │ │ └── ReadMe.md │ │ ├── task_014_dig/ │ │ │ └── ReadMe.md │ │ ├── task_015_dir/ │ │ │ └── ReadMe.md │ │ ├── task_016_dos2unix/ │ │ │ └── ReadMe.md │ │ ├── task_017_du/ │ │ │ └── ReadMe.md │ │ ├── task_018_export/ │ │ │ └── ReadMe.md │ │ ├── task_019_fc/ │ │ │ └── ReadMe.md │ │ ├── task_020_find/ │ │ │ └── ReadMe.md │ │ ├── task_021_firewall_cmd/ │ │ │ └── ReadMe.md │ │ ├── task_022_free/ │ │ │ └── ReadMe.md │ │ ├── task_023_ftp/ │ │ │ └── ReadMe.md │ │ ├── task_024_git/ │ │ │ └── ReadMe.md │ │ ├── task_025_gunzip_gzip/ │ │ │ └── ReadMe.md │ │ ├── task_026_head/ │ │ │ └── ReadMe.md │ │ ├── task_027_history/ │ │ │ └── ReadMe.md │ │ ├── task_028_host/ │ │ │ └── ReadMe.md │ │ ├── task_029_hostname/ │ │ │ └── ReadMe.md │ │ ├── task_030_ifconfig/ │ │ │ └── ReadMe.md │ │ ├── task_031_ip/ │ │ │ └── ReadMe.md │ │ ├── task_032_jar/ │ │ │ └── ReadMe.md │ │ ├── task_033_jobs/ │ │ │ └── ReadMe.md │ │ ├── task_034_jq/ │ │ │ ├── ReadMe.md │ │ │ ├── example1.json │ │ │ ├── example2.json │ │ │ ├── example3.json │ │ │ └── example4.json │ │ ├── task_035_jstack/ │ │ │ └── ReadMe.md │ │ ├── task_036_keytool/ │ │ │ └── ReadMe.md │ │ ├── task_037_ln/ │ │ │ └── ReadMe.md │ │ ├── task_038_lsof/ │ │ │ └── ReadMe.md │ │ ├── task_039_mail/ │ │ │ └── ReadMe.md │ │ ├── task_040_mkdir/ │ │ │ └── ReadMe.md │ │ ├── task_041_mount/ │ │ │ └── ReadMe.md │ │ ├── task_042_nc/ │ │ │ └── ReadMe.md │ │ ├── task_043_netstat/ │ │ │ └── ReadMe.md │ │ ├── task_044_nmap/ │ │ │ └── ReadMe.md │ │ ├── task_045_nohup/ │ │ │ └── ReadMe.md │ │ ├── task_046_nslookup/ │ │ │ └── ReadMe.md │ │ ├── task_047_openssl/ │ │ │ └── ReadMe.md │ │ ├── task_048_passwd/ │ │ │ └── ReadMe.md │ │ ├── task_049_ping/ │ │ │ └── ReadMe.md │ │ ├── task_050_ps/ │ │ │ └── ReadMe.md │ │ ├── task_051_rm/ │ │ │ └── ReadMe.md │ │ ├── task_052_route/ │ │ │ └── ReadMe.md │ │ ├── task_053_rsync/ │ │ │ └── ReadMe.md │ │ ├── task_054_scp/ │ │ │ └── ReadMe.md │ │ ├── task_055_sed/ │ │ │ └── ReadMe.md │ │ ├── task_056_sh/ │ │ │ └── ReadMe.md │ │ ├── task_057_setenforce/ │ │ │ └── ReadMe.md │ │ ├── task_058_sort/ │ │ │ └── ReadMe.md │ │ ├── task_059_ssh/ │ │ │ └── ReadMe.md │ │ ├── task_060_sudo/ │ │ │ └── ReadMe.md │ │ ├── task_061_sysctl/ │ │ │ └── ReadMe.md │ │ ├── task_062_system/ │ │ │ └── ReadMe.md │ │ ├── task_063_systemctl/ │ │ │ └── ReadMe.md │ │ ├── task_064_tail/ │ │ │ └── ReadMe.md │ │ ├── task_065_tar/ │ │ │ └── ReadMe.md │ │ ├── task_066_tee/ │ │ │ └── ReadMe.md │ │ ├── task_067_telnet/ │ │ │ └── ReadMe.md │ │ ├── task_068_gradle/ │ │ │ └── ReadMe.md │ │ ├── task_069_tr/ │ │ │ └── ReadMe.md │ │ ├── task_070_traceroute/ │ │ │ └── ReadMe.md │ │ ├── task_071_top/ │ │ │ └── ReadMe.md │ │ ├── task_072_tty/ │ │ │ └── ReadMe.md │ │ ├── task_073_ulimit/ │ │ │ └── ReadMe.md │ │ ├── task_074_umask/ │ │ │ └── ReadMe.md │ │ ├── task_075_uname/ │ │ │ └── ReadMe.md │ │ ├── task_076_uniq/ │ │ │ └── ReadMe.md │ │ ├── task_077_unzip/ │ │ │ └── ReadMe.md │ │ ├── task_078_uptime/ │ │ │ └── ReadMe.md │ │ ├── task_079_vi/ │ │ │ └── ReadMe.md │ │ ├── task_080_vmstat/ │ │ │ └── ReadMe.md │ │ ├── task_081_wall/ │ │ │ └── ReadMe.md │ │ ├── task_082_wc/ │ │ │ └── ReadMe.md │ │ ├── task_083_wget/ │ │ │ └── ReadMe.md │ │ ├── task_084_whatis/ │ │ │ └── ReadMe.md │ │ ├── task_085_whereis/ │ │ │ └── ReadMe.md │ │ ├── task_086_which/ │ │ │ └── ReadMe.md │ │ ├── task_087_xargs/ │ │ │ └── ReadMe.md │ │ ├── task_088_zcat/ │ │ │ └── ReadMe.md │ │ ├── task_089_zip/ │ │ │ └── ReadMe.md │ │ ├── task_090_ansible/ │ │ │ └── ReadMe.md │ │ ├── task_091_terraform/ │ │ │ └── ReadMe.md │ │ ├── task_092_kubectl/ │ │ │ ├── ReadMe-static.md │ │ │ ├── ReadMe-table.md │ │ │ └── ReadMe.md │ │ ├── task_093_kustomize/ │ │ │ └── ReadMe.md │ │ ├── task_094_helm/ │ │ │ └── ReadMe.md │ │ ├── task_095_7z/ │ │ │ └── ReadMe.md │ │ ├── task_096_grep/ │ │ │ └── ReadMe.md │ │ ├── task_097_egrep/ │ │ │ └── ReadMe.md │ │ ├── task_098_growpart/ │ │ │ └── ReadMe.md │ │ ├── task_099_resize2fs/ │ │ │ └── ReadMe.md │ │ ├── task_100_lsblk/ │ │ │ └── ReadMe.md │ │ ├── task_101_kops/ │ │ │ └── ReadMe.md │ │ ├── task_102_mongo/ │ │ │ └── ReadMe.md │ │ ├── task_103_mongorestore/ │ │ │ └── ReadMe.md │ │ ├── task_104_mysql/ │ │ │ └── ReadMe.md │ │ ├── task_105_mysqldump/ │ │ │ └── ReadMe.md │ │ ├── task_106_ssh_keygen/ │ │ │ └── ReadMe.md │ │ ├── task_107_iptables/ │ │ │ └── ReadMe.md │ │ ├── task_108_adduser/ │ │ │ └── ReadMe.md │ │ ├── task_109_chgrp/ │ │ │ └── ReadMe.md │ │ ├── task_110_sqlcmd/ │ │ │ └── ReadMe.md │ │ ├── task_111_tcpdump/ │ │ │ └── ReadMe.md │ │ ├── task_112_step/ │ │ │ └── ReadMe.md │ │ ├── task_113_set/ │ │ │ ├── ReadMe.md │ │ │ ├── called_script.sh │ │ │ ├── caller_script.sh │ │ │ ├── using_set_eu_with_pipefail.sh │ │ │ ├── with_pipefail.sh │ │ │ └── without_pipefail.sh │ │ ├── task_114_mvn/ │ │ │ └── ReadMe.md │ │ ├── task_115_source/ │ │ │ ├── 1.sh │ │ │ ├── 2.sh │ │ │ └── ReadMe.md │ │ ├── task_116_whoami/ │ │ │ └── ReadMe.md │ │ ├── task_117_who/ │ │ │ └── ReadMe.md │ │ ├── task_118_last/ │ │ │ └── ReadMe.md │ │ ├── task_119_az/ │ │ │ └── ReadMe.md │ │ ├── task_120_dpkg/ │ │ │ └── ReadMe.md │ │ ├── task_121_cat/ │ │ │ └── ReadMe.md │ │ ├── task_122_tar/ │ │ │ └── ReadMe.md │ │ ├── task_123_code/ │ │ │ └── ReadMe.md │ │ ├── task_124_unset/ │ │ │ └── ReadMe.md │ │ ├── task_125_ssh_keygen/ │ │ │ └── ReadMe.md │ │ ├── task_126_command/ │ │ │ └── ReadMe.md │ │ ├── task_127_shasum/ │ │ │ └── ReadMe.md │ │ ├── task_128_sh/ │ │ │ └── ReadMe.md │ │ ├── task_129_yq/ │ │ │ └── ReadMe.md │ │ ├── task_130_basename/ │ │ │ └── ReadMe.md │ │ └── task_131_readlink/ │ │ └── ReadMe.md │ ├── commands_windows/ │ │ ├── ReadMe-static.md │ │ ├── ReadMe.md │ │ └── taskset_commands_windows_os_and_concepts/ │ │ ├── task_001_systeminfo/ │ │ │ └── ReadMe.md │ │ ├── task_002_dir/ │ │ │ └── ReadMe.md │ │ ├── task_003_findstr/ │ │ │ └── ReadMe.md │ │ ├── task_004_del/ │ │ │ └── ReadMe.md │ │ ├── task_005_rmdir/ │ │ │ └── ReadMe.md │ │ ├── task_006_echo/ │ │ │ └── ReadMe.md │ │ └── task_007_setx/ │ │ └── ReadMe.md │ └── concepts_linux/ │ ├── ReadMe.md │ └── taskset_concepts_linux_os_and_concepts/ │ ├── task_001_etc_os_release_file/ │ │ └── ReadMe.md │ ├── task_002_etc_passwd/ │ │ └── ReadMe.md │ ├── task_003_etc_group/ │ │ └── ReadMe.md │ ├── task_003_etc_sudoers/ │ │ └── ReadMe.md │ ├── task_004_etc_hosts/ │ │ └── ReadMe.md │ ├── task_005_etc_resolv.conf/ │ │ └── ReadMe.md │ ├── task_006_proc_sys_net_ipv4_ip_forward/ │ │ └── ReadMe.md │ ├── task_007_etc_sysctl.conf/ │ │ └── ReadMe.md │ ├── task_008_special_variables/ │ │ └── ReadMe.md │ ├── task_009_etc_fstab/ │ │ └── ReadMe.md │ ├── task_010_shebang_character/ │ │ └── ReadMe.md │ ├── task_011_redirection/ │ │ └── ReadMe.md │ ├── task_012_eof_here_doc/ │ │ └── ReadMe.md │ └── task_013_referring_documentation/ │ └── ReadMe.md ├── productivity_tools/ │ ├── ReadMe-static.md │ ├── ReadMe.md │ └── taskset_productivity_tools/ │ ├── task_000_setting_up_terminal/ │ │ └── ReadMe.md │ ├── task_001_visualizing_subnets_in_vpc/ │ │ └── ReadMe.md │ ├── task_002_visualizing_k8s_using_dashboard/ │ │ └── ReadMe.md │ ├── task_003_if_IP_belongs_to_CIDR/ │ │ └── ReadMe.md │ ├── task_004_virtual_box/ │ │ └── ReadMe.md │ └── task_005_productivity_shortcuts/ │ ├── ReadMe-bkp.md │ └── ReadMe.md ├── security/ │ └── trivy/ │ ├── ReadMe.md │ └── taskset_trivy_security/ │ └── task_001_scans/ │ └── ReadMe.md └── web_servers/ └── nginx/ ├── ReadMe.md └── taskset_nginx_web_servers/ ├── task_001_running_nginx/ │ └── running-nginx.md ├── task_002_nginx_docker_compose_hello_world/ │ ├── docker-compose.yaml │ ├── nginx-docker-compose-hello-world.md │ └── src/ │ └── index.html ├── task_003_nginx_conf/ │ ├── ReadMe.md │ ├── conf.d/ │ │ └── default.conf │ └── html/ │ └── index.html └── task_004_return_different_http_codes/ ├── Dockerfile ├── ReadMe.md ├── conf.d/ │ └── default.conf └── html/ └── index.html ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ .idea .aws-credentials .terraform *terraform.tfvars *.tfstate *.tfstate.backup *.terraform.tfstate.lock.info credentials.txt .terraform.lock.hcl *.tfvars *.pem ### docker-compose kitchen .data hsperfdata_root ### Ignore till reviewd to-be-reviewed ### coding-tasks node_modules package-lock.json ### ansible-kitchen .vagrant *.retry .venv # pre and post tasks folders (user defined) pre_tasks/ post_tasks/ .kitchen/ .kitchen.local.yml .bundle ## python __pycache__ ## go pkg bin ## node .env ## python venv ## Temp home/infrastructure-as-code/terraform/oci/task-004-kubernetes-cluster/ home/infrastructure-as-code/terraform/oci/task-005-set-up-resource-discovery/ ## tmp tmp ## Private .private ================================================ FILE: README.md ================================================ # Home > [Auto](https://github.com/codeaprendiz/learn_fullstack/blob/main/home/php/intermediate/taskset_intermediate_php/task_004_createGlobalMarkdownTable/generate-readme.php) generated ReadMe. Number of tasks: 473 - [cloud certifications](#cloud_certifications) - [cloud providers](#cloud_providers) - [containers](#containers) - [databases](#databases) - [devops blogs](#devops_blogs) - [infrastructure as code](#infrastructure_as_code) - [interview](#interview) - [observability](#observability) - [os and concepts](#os_and_concepts) - [productivity tools](#productivity_tools) - [security](#security) - [web servers](#web_servers) ## cloud_certifications | aws | |-------------------------------------------| | [Tasks: 1](home/cloud_certifications/aws) | ## cloud_providers | aws | azure | gcp | oci | |--------------------------------------|-----------------------------------------|---------------------------------------|--------------------------------------| | [Tasks: 9](home/cloud_providers/aws) | [Tasks: 11](home/cloud_providers/azure) | [Tasks: 42](home/cloud_providers/gcp) | [Tasks: 3](home/cloud_providers/oci) | ## containers | docker | docker_compose | kubernetes | |-------------------------------------|---------------------------------------------|-----------------------------------------| | [Tasks: 33](home/containers/docker) | [Tasks: 16](home/containers/docker_compose) | [Tasks: 67](home/containers/kubernetes) | ## databases | mongo | mssql | mysql | oracle19c | postgreSQL | |----------------------------------|-----------------------------------|----------------------------------|--------------------------------------|---------------------------------------| | [Tasks: 4](home/databases/mongo) | [Tasks: 38](home/databases/mssql) | [Tasks: 6](home/databases/mysql) | [Tasks: 1](home/databases/oracle19c) | [Tasks: 7](home/databases/postgreSQL) | ## devops_blogs ## infrastructure_as_code | ansible | aws_terraform | gcp_terraform | oci_terraform | |--------------------------------------------------|--------------------------------------------------------|-------------------------------------------------------|-------------------------------------------------------| | [Tasks: 10](home/infrastructure_as_code/ansible) | [Tasks: 31](home/infrastructure_as_code/terraform/aws) | [Tasks: 8](home/infrastructure_as_code/terraform/gcp) | [Tasks: 4](home/infrastructure_as_code/terraform/oci) | ## interview | coding_assignments | |------------------------------------------------| | [Tasks: 11](home/interview/coding_assignments) | ## observability | metrics | opentelemetry | prometheus | |----------------------------------------|----------------------------------------------|-------------------------------------------| | [Tasks: 3](home/observability/metrics) | [Tasks: 2](home/observability/opentelemetry) | [Tasks: 3](home/observability/prometheus) | ## os_and_concepts | commands_linux | commands_windows | concepts_linux | |---------------------------------------------------|---------------------------------------------------|--------------------------------------------------| | [Tasks: 131](home/os_and_concepts/commands_linux) | [Tasks: 7](home/os_and_concepts/commands_windows) | [Tasks: 14](home/os_and_concepts/concepts_linux) | ## productivity_tools | | |-------------------------------------| | [Tasks: 6](home/productivity_tools) | ## security | trivy | |---------------------------------| | [Tasks: 1](home/security/trivy) | ## web_servers | nginx | |------------------------------------| | [Tasks: 4](home/web_servers/nginx) | ================================================ FILE: ReadMe_static.md ================================================ # ToDos - SAST - DAST - IAST - Checkmarx - Trivy - SonarQube ================================================ FILE: _config.yml ================================================ theme: jekyll-theme-cayman ================================================ FILE: home/cloud_certifications/aws/ReadMe.md ================================================ # taskset_aws_cloud_certifications > [Auto](https://github.com/codeaprendiz/learn_fullstack/blob/main/home/php/intermediate/taskset_intermediate_php/task_004_createGlobalMarkdownTable/generate-readme.php) generated ReadMe. Number of tasks: 1 | Task | Description | |----------|-----------------------------------------------------------------------------------------------------------------------------------------------------| | task_001 | [task_001_aws_certified_solutions_architect_professional](taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional) | ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/ReadMe.md ================================================ [Cheat Sheet - AWS Pro Path](https://tutorialsdojo.com/aws-certified-solutions-architect-professional) [Free AWS Learning Plan](https://explore.skillbuilder.aws/learn/signin) [Comparision of AWS Services](https://tutorialsdojo.com/comparison-of-aws-services) - [API Gateway](apiGateway.md) - [Application DiscoveryService](applicationDiscoveryService.md) - [Aurora](aurora.md) - [Backup](backup.md) - [Batch](batch.md) - [Best Practices](bestPractices.md) - [Billing And Cost Management](billingAndCostManagement.md) - [Certificate Manager](certificateManager.md) - [Cloud Adoption Readiness Tool CART](cloudAdoptionReadinessTool.md) - [Cloud Formation](cloudFormation.md) - [Cloud Front](cloudFront.md) - [CloudHSM](cloudHSM.md) - [CloudSearch](cloudSearch.md) - [CloudTrail](cloudTrail.md) - [CloudWatch](cloudwatch.md) - [CloudWatchLogs](cloudWatchLogs.md) - [CodeBuild](codeBuild.md) - [CodeCommit](codeCommit.md) - [CodeDeploy](codeDeploy.md) - [CodePipeline](codePipeline.md) - [Cognito](cognito.md) - [Command Line Interface](commandLineInterface.md) - [Config](config.md) - [Connect](connect.md) - [ControlTower](awsControlTower.md) - [Data Migration Service](dataMigrationService.md) - [DeveloperToolsConsole](developerToolsConsole.md) - [Direct Connect](directConnect.md) - [Dynamo DB](dynamodb.md) - [EC2](ec2.md) - [EC2 Autoscaling](ec2AutoScaling.md) - [ECS](ecs.md) - [ElasticBeanStalk](elasticBeanStalk.md) - [Event Bridge](eventBridge.md) - [Elastic Cache](elasticCache.md) - [Elastic File System](elasticFileSystem.md) - [Elastic Load Balancing](elasticLoadBalancing.md) - [Guard Duty](guardDuty.md) - [IAM](iam.md) - [Inspector](inspector.md) - [Kinesis](kinesis.md) - [Lambda](lambda.md) - [Lex](lex.md) - [Macie](macie.md) - [Mechanical Turk](mechanicalTurk.md) - [Migration Hub](migrationHub.md) - [OpsWorks](opsworks.md) - [Organization](organizations.md) - [Quick Sight](quickSight.md) - [RDS](rds.md) - [Redshift](redShift.md) - [Rekognition](rekognition.md) - [Resource Access Manager](resourceAccessManager.md) - [Route53](route53.md) - [S3](s3.md) - [SageMaker](sageMaker.md) - [Schema Conversion Tool](schemaConversionTool.md) - [Secrets Manager](secretsManager.md) - [Security Token Service](securityTokenService.md) - [Serverless Application Model](serverlessApplicationModel.md) - [Server Migration Service](serverMigrationService.md) - [Service Catalog](serviceCatalog.md) - [Shield](shield.md) - [Simple Notification Service](simpleNotificationService.md) - [Simple Queue Service](simpleQueueService.md) - [Simple Workflow Service](simpleWorkflowService.md) - [Single Sing-On](singleSignOn.md) - [Snowball](snowball.md) - [Snowball Edge](snowballEdge.md) - [Storage Gatway](storageGateway.md) - [System Manager](systemManager.md) - [Transcribe](transcribe.md) - [VPC](vpc.md) - [WAF](waf.md) - [Web Identify Federation](webIdentifyFederation.md) - [Well-Architected](wellArchitected.md) - [Whitepapers](whitepapers.md) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/apiGateway.md ================================================ # API Gateway - [CheatSheet - API Gateway](https://tutorialsdojo.com/amazon-api-gateway) - [CheatSheet - How to invalidate API gateway cache](https://tutorialsdojo.com/how-to-invalidate-api-gateway-cache) - [FAQs - API Gateway](https://aws.amazon.com/api-gateway/faqs) ## Enabling API caching to enhance responsiveness [Enabling API caching to enhance responsiveness](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-caching.html) - You can enable API caching in Amazon API Gateway to cache your endpoint's responses. - With caching, you can reduce the number of calls made to your endpoint and also improve the latency of requests to your API. - When you enable caching for a stage, API Gateway caches responses from your endpoint for a specified time-to-live (TTL) period, in seconds. - API Gateway then responds to the request by looking up the endpoint response from the cache instead of making a request to your endpoint - The default TTL value for API caching is 300 seconds. The maximum TTL value is 3600 seconds. TTL=0 means caching is disabled. ## Working with WebSocket APIs [Working with WebSocket APIs](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api.html) - A WebSocket API in API Gateway is a collection of WebSocket routes that are integrated with backend HTTP endpoints, Lambda functions, or other AWS services. - API Gateway WebSocket APIs are bidirectional. A client can send messages to a service, and services can independently send messages to clients - This bidirectional behavior enables richer client/service interactions because services can push data to clients without requiring clients to make an explicit request - WebSocket APIs are often used in real-time applications such as chat applications, collaboration platforms, multiplayer games, and financial trading platforms. ### Use @connections commands in your backend service [Use @connections commands in your backend service](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-how-to-call-websocket-api-connections.html) Your backend service can use the following WebSocket connection HTTP requests to send a callback message to a connected client, get connection information, or disconnect the client ```bash POST https://{api-id}.execute-api.us-east-1.amazonaws.com/{stage}/@connections/{connection_id} ``` ### Lambda Integration #### Understand API Gateway Lambda proxy integration [Understand API Gateway Lambda proxy integration](https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html) - Amazon API Gateway Lambda proxy integration is a simple, powerful, and nimble mechanism to build an API with a setup of a single API method - The Lambda proxy integration allows the client to call a single Lambda function in the backend. - The function accesses many resources or features of other AWS services, including calling other Lambda functions. ## Tutorials ### Tutorial: Create a REST API as an Amazon Kinesis proxy in API Gateway [Tutorial: Create a REST API as an Amazon Kinesis proxy in API Gateway](https://docs.aws.amazon.com/apigateway/latest/developerguide/integrating-api-with-aws-services-kinesis.html) ## Notes - Lambda can scale faster than the regular Auto Scaling feature of Amazon EC2, Amazon Elastic Beanstalk, or Amazon ECS. This is because AWS Lambda is more lightweight than other computing services ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/applicationDiscoveryService.md ================================================ # AWS Application Discovery Service [What Is AWS Application Discovery Service?](https://docs.aws.amazon.com/application-discovery/latest/userguide/what-is-appdiscovery.html) - AWS Application Discovery Service helps you plan your migration to the AWS cloud by collecting usage and configuration data about your on-premises servers. - Application Discovery Service is integrated with AWS Migration Hub, which simplifies your migration tracking as it aggregates your migration status information into a single console. - You can view the discovered servers, group them into applications, and then track the migration status of each application from the Migration Hub console in your home region. - All discovered data is stored in your AWS Migration Hub home region. - Therefore, you must set your home region in the Migration Hub console or with CLI commands before performing any discovery and migration activities. - Your data can be exported for analysis in Microsoft Excel or AWS analysis tools such as Amazon Athena and Amazon QuickSight. - Application Discovery Service offers two ways of performing discovery and collecting data about your on-premises servers: - Agentless discovery can be performed by deploying the AWS Agentless Discovery Connector (OVA file) through your VMware vCenter - Agent-based discovery can be performed by deploying the AWS Application Discovery Agent on each of your VMs and physical servers. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/aurora.md ================================================ # Aurora [Cheat Sheet - RDS](https://tutorialsdojo.com/amazon-relational-database-service-amazon-rds) [What is Amazon Aurora?](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) - Amazon Aurora (Aurora) is a fully managed relational database engine that's compatible with MySQL and PostgreSQL - The code, tools, and applications you use today with your existing MySQL and PostgreSQL databases can be used with Aurora. - With some workloads, Aurora can deliver up to five times the throughput of MySQL and up to three times the throughput of PostgreSQL without requiring changes to most of your existing applications. ## Amazon Aurora DB clusters [Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.html) - An Amazon Aurora DB cluster consists of one or more DB instances and a cluster volume that manages the data for those DB instances. - An Aurora cluster volume is a virtual database storage volume that spans multiple Availability Zones, with each Availability Zone having a copy of the DB cluster data. - Primary DB instance – Supports read and write operations, and performs all of the data modifications to the cluster volume. Each Aurora DB cluster has one primary DB instance. - Aurora Replica – Connects to the same storage volume as the primary DB instance and supports only read operations. Each Aurora DB cluster can have up to 15 Aurora Replicas in addition to the primary DB instance ### Amazon Aurora storage and reliability [Amazon Aurora storage and reliability](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html) - Aurora data is stored in the cluster volume, which is a single, virtual volume that uses solid state drives (SSDs). - A cluster volume consists of copies of the data across three Availability Zones in a single AWS Region - Aurora cluster volumes automatically grow as the amount of data in your database increases. - An Aurora cluster volume can grow to a maximum size of 128 tebibytes (TiB). - Even though an Aurora cluster volume can grow up to 128 tebibytes (TiB), you are only charged for the space that you use in an Aurora cluster volume. ### High availability for Amazon Aurora [High availability for Amazon Aurora](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.AuroraHighAvailability.html) - The Amazon Aurora architecture involves separation of storage and compute - The data remains safe even if some or all of the DB instances in the cluster become unavailable. ## Managing DB instance [Working with storage for Amazon RDS DB instances](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIOPS.StorageTypes.html) - To specify how you want your data stored in Amazon RDS, choose a storage type and provide a storage size when you create or modify a DB instance. - Later, you can increase the amount or change the type of storage by modifying the DB instance. ### Overview of Amazon Aurora global databases [Overview of Amazon Aurora global databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database-overview) - Amazon Aurora global databases span multiple AWS Regions, enabling low latency global reads and providing fast recovery from the rare outage that might affect an entire AWS Region. - An Aurora global database has a primary DB cluster in one Region, and up to five secondary DB clusters in different Regions. ### Connecting to an Amazon Aurora global database How you connect to an Aurora global database depends on whether you need to write to the database or read from the database: - For read-only requests or queries, you connect to the reader endpoint for the Aurora cluster in your AWS Region. - To run data manipulation language (DML) or data definition language (DDL) statements, you connect to the cluster endpoint for the primary cluster. This endpoint might be in a different AWS Region than your application. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/awsControlTower.md ================================================ # ControlTower - AWS Organizations is an account management service that lets you consolidate multiple AWS accounts into an organization that you create and centrally manage - With Organizations, you can create member accounts and invite existing accounts to join your organization. - You can organize those accounts into groups and attach policy-based controls - In AWS Control Tower, Organizations helps centrally manage billing; control access, compliance, and security; and share resources across your member AWS accounts. - Accounts are grouped into logical groups, called organizational units (OUs) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/backup.md ================================================ # AWS Backup ## Restoring a Storage Gateway volume [Restoring a Storage Gateway volume](https://docs.aws.amazon.com/aws-backup/latest/devguide/restoring-storage-gateway.html) - If you are restoring an AWS Storage Gateway volume snapshot, you can choose to restore the snapshot as an Storage Gateway volume or as an Amazon EBS volume. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/batch.md ================================================ # Batch [What Is AWS Batch?](https://docs.aws.amazon.com/batch/latest/userguide/what-is-batch.html) [Cheat Sheet - AWS Batch](https://tutorialsdojo.com/aws-batch) - helps you to run batch computing workloads on the AWS Cloud - is a common way for developers, scientists, and engineers to access large amounts of compute resources - removes the undifferentiated heavy lifting of configuring and managing the required infrastructure, similar to traditional batch computing software - This service can efficiently provision resources in response to jobs submitted in order to eliminate capacity constraints, reduce compute costs, and deliver results quickly. ## Compute environment [Compute environment](https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html) - Job queues are mapped to one or more compute environments. - Compute environments contain the Amazon ECS container instances that are used to run containerized batch jobs ## Use Cases [AWS Batch Use cases](https://aws.amazon.com/batch/use-cases) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/bestPractices.md ================================================ # Best Practices ## Migration Strategies [CheatSheet - aws-migration-strategies-the-6-rs](https://tutorialsdojo.com/aws-migration-strategies-the-6-rs) [Phase 2: Plan](https://docs.aws.amazon.com/prescriptive-guidance/latest/strategy-database-migration/planning-phase.html) 6 Application Migration Strategies: “The 6 R’s” - Rehosting : Otherwise known as `lift-and-shift`. - large legacy migration scenario where the organization is looking to scale its migration quickly to meet a business case, we find that the majority of applications are rehosted. - Replatforming : I sometimes call this `lift-tinker-and-shift`. - Here you might make a few cloud (or other) optimizations in order to achieve some tangible benefit, but you aren’t otherwise changing the core architecture of the application. - You may be looking to reduce the amount of time you spend managing database instances by migrating to a database-as-a-service platform like Amazon Relational Database Service (Amazon RDS), or migrating your application to a fully managed platform like Amazon Elastic Beanstalk. - Repurchasing : Moving to a different product. - I most commonly see repurchasing as a move to a SaaS platform. - Refactoring / Re-architecting : Re-imagining how the application is architected and developed, typically using cloud-native features. - This is typically driven by a strong business need to add features, scale, or performance that would otherwise be difficult to achieve in the application’s existing environment. - Retire : Get rid of. - Once you’ve discovered everything in your environment, you might ask each functional area who owns each application. - We’ve found that as much as 10% (I’ve seen 20%) of an enterprise IT portfolio is no longer useful, and can simply be turned off. - Retain : Usually this means “revisit” or do nothing (for now). - Maybe you’re still riding out some depreciation, aren’t ready to prioritize an application that was recently upgraded, or are otherwise not inclined to migrate some applications. - You should only migrate what makes sense for the business; ## Disaster Recovery [Disaster Recovery Slides](https://www.slideshare.net/AmazonWebServices/disaster-recovery-options-with-aws) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/billingAndCostManagement.md ================================================ # Billing And Cost Management [What is AWS Billing?](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-what-is.html) [CheatSheet - AWS Billing And Cost Management](https://tutorialsdojo.com/aws-billing-and-cost-management) ## Consolidated billing for AWS Organization ### Reserved Instances #### Turning off reserved instances and Savings Plans discount sharing - The management account of an organization can turn off Reserved Instance (RI) discount and Savings Plans discount sharing for any accounts in that organization, including the management account - This means that RIs and Savings Plans discounts aren't shared between any accounts that have sharing turned off. - To share an RI or Savings Plans discount with an account, both accounts must have sharing turned on [Cost Alloc Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) - After you activate cost allocation tags, AWS uses the cost allocation tags ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/certificateManager.md ================================================ # Certificate Manager [AWS Certificate Manager](https://aws.amazon.com/certificate-manager) - AWS Certificate Manager is a service that lets you easily provision, manage, and deploy public and private Secure Sockets Layer/Transport Layer Security (SSL/TLS) certificates for use with AWS services and your internal connected resources. - SSL/TLS certificates are used to secure network communications and establish the identity of websites over the Internet as well as resources on private networks. > To use a certificate with Elastic Load Balancing for the same site (the same fully qualified domain name, or FQDN, or set of FQDNs) in a different Region, you must request a new certificate for each Region in which you plan to use it. > Two use an ACM certificate with Amazon CloudFront, you must request the certificate in the US East (N. Virginia) region. ### Services integrated with AWS Certificate Manager [Services integrated with AWS Certificate Manager](https://docs.aws.amazon.com/acm/latest/userguide/acm-services.html) ## FAQs [FAQs](https://aws.amazon.com/certificate-manager/faqs) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/cloudAdoptionReadinessTool.md ================================================ # AWS Cloud Adoption Readiness Tool (CART) [AWS Cloud Adoption Readiness Tool (CART)](https://cloudreadiness.amazonaws.com/#/cart) - Assess your organization’s cloud migration readiness with sixteen questions and set a path for cloud adoption success. - The AWS Cloud Adoption Readiness Tool (CART) helps organizations of all sizes develop efficient and effective plans for cloud adoption and enterprise cloud migrations. - This 16-question online survey and assessment report details your cloud migration readiness across six perspectives including business, people, process, platform, operations, and security. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/cloudFormation.md ================================================ # CloudFormation [What is AWS CloudFormation?](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/Welcome.html) [Cheat Sheet - AWS CloudFormation](https://tutorialsdojo.com/aws-cloudformation) [Cheat Sheet - aws-cloudformation-stacksets-and-nested-stacks](https://tutorialsdojo.com/aws-cloudformation-stacksets-and-nested-stacks) [Cheat Sheet - Elastic Beanstalk vs CloudFormation vs OpsWorks vs CodeDeploy](https://tutorialsdojo.com/elastic-beanstalk-vs-cloudformation-vs-opsworks-vs-codedeploy) - AWS CloudFormation is a service that helps you model and set up your AWS resources so that you can spend less time managing those resources and more time focusing on your applications that run in AWS. - You create a template that describes all the AWS resources that you want (like Amazon EC2 instances or Amazon RDS DB instances), and CloudFormation takes care of provisioning and configuring those resources for you. ### Working with AWS CloudFormation StackSets [AWS CloudFormation StackSets](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/what-is-cfnstacksets.html) - AWS CloudFormation StackSets extends the functionality of stacks by enabling you to create, update, or delete stacks across multiple accounts and Regions with a single operation. ### Updating stacks using change sets [Updating stacks using change sets](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html) - When you need to update a stack, understanding how your changes will affect running resources before you implement them can help you update stacks with confidence - Change sets allow you to preview how proposed changes to a stack might impact your running resources, for example, whether your changes will delete or replace any critical resources, AWS CloudFormation makes the changes to your stack only when you decide to execute the change set, allowing you to decide whether to proceed with your proposed changes or explore other changes by creating another change set. ```bash CloudFormation CLI create-change-set ``` ### Conditionally create resources for a production, development, or test stack [Conditionally create resources for a production, development, or test stack](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/conditions-sample-templates.html) - In some cases, you might want to create stacks that are similar but with minor tweaks. - For example, you might have a template that you use for production applications. - You want to create the same production stack so that you can use it for development or testing. ### Exporting stack output values [Exporting stack output values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-stack-exports.html) - To share information between stacks, export a stack's output values. - Other stacks that are in the same AWS account and region can import the exported values. ### Listing stacks that import an exported output value [Listing stacks that import an exported output value](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-stack-imports.html) - When you export an output value, stacks that are in the same AWS account and region can import that value. - To see which stacks are importing a particular output value, use the list import action. ### DeletionPolicy attribute [DeletionPolicy](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-deletionpolicy.html) - With the DeletionPolicy attribute you can preserve, and in some cases, backup a resource when its stack is deleted. - You specify a DeletionPolicy attribute for each resource that you want to control. - If a resource has no DeletionPolicy attribute, AWS CloudFormation deletes the resource by default. > The default policy is Snapshot for AWS::RDS::DBCluster resources and for AWS::RDS::DBInstance resources that don't specify the DBClusterIdentifier property. **DeletionPolicy options** - Delete - CloudFormation deletes the resource and all its content if applicable during stack deletion - By default, if you don't specify a DeletionPolicy, CloudFormation deletes your resources. - Retain - CloudFormation keeps the resource without deleting the resource or its contents when its stack is deleted. - Snapshot - For resources that support snapshots, CloudFormation creates a snapshot for the resource before deleting it ### UpdateReplacePolicy attribute [UpdateReplacePolicy](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatereplacepolicy.html) - Use the UpdateReplacePolicy attribute to retain or, in some cases, backup the existing physical instance of a resource when it's replaced during a stack update operation. - When you initiate a stack update, AWS CloudFormation updates resources based on differences between what you submit and the stack's current template and parameters. - If you update a resource property that requires that the resource be replaced, CloudFormation recreates the resource during the update. - Recreating the resource generates a new physical ID. ### Walkthrough: Refer to resource outputs in another AWS CloudFormation stack [Walkthrough: Refer to resource outputs in another AWS CloudFormation stack](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/walkthrough-crossstackref.html) - To export resources from one AWS CloudFormation stack to another, create a cross-stack reference. Cross-stack references let you use a layered or service-oriented architecture. - Instead of including all resources in a single stack, you create related AWS resources in separate stacks; then you can refer to required resource outputs from other stacks. - By restricting cross-stack references to outputs, you control the parts of a stack that are referenced by other stacks. - There are some limitations if there is a cross-stack reference between two CloudFormation stacks. Stack A cannot be deleted if it has a resource output that is referenced by stack B. - You cannot modify the output value that is referenced by another stack - you can update stack B to remove the cross-stack reference. ### Custom resources [Custom resources](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-custom-resources.html) - Custom resources enable you to write custom provisioning logic in templates that AWS CloudFormation runs anytime you create, update (if you changed the custom resource), or delete stacks. - For example, you might want to include resources that aren't available as AWS CloudFormation resource types. - You can include those resources by using custom resources. **How custom resources work** - The template developer defines a custom resource in their template, which includes a service token and any input data parameters. Depending on the custom resource, the input data might be required; however, the service token is always required. - The service token specifies where AWS CloudFormation sends requests to, such as an Amazon SNS topic ARN or an AWS Lambda function ARN ## Intrinsic Functions [Intrinsic function reference](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference.html) - AWS CloudFormation provides several built-in functions that help you manage your stacks. Use intrinsic functions in your templates to assign values to properties that are not available until runtime. ### Using dynamic references to specify template values [Using dynamic references to specify template values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html) - Dynamic references provide a compact, powerful way for you to specify external values that are stored and managed in other services, such as the Systems Manager Parameter Store, in your stack templates. - When you use a dynamic reference, CloudFormation retrieves the value of the specified reference when necessary during stack and change set operations. - CloudFormation currently supports the following dynamic reference patterns: - ssm, for plaintext values stored in AWS Systems Manager Parameter Store. - ssm-secure, for secure strings stored in AWS Systems Manager Parameter Store. - secretsmanager, for entire secrets or specific secret values that are stored in AWS Secrets Manager. ### Fn::GetAtt [Fn::GetAtt](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-getatt.html) - The Fn::GetAtt intrinsic function returns the value of an attribute from a resource in the template ## Blogs [Use CloudFormation StackSets to Provision Resources Across Multiple AWS Accounts and Regions](https://aws.amazon.com/blogs/aws/use-cloudformation-stacksets-to-provision-resources-across-multiple-aws-accounts-and-regions) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/cloudFront.md ================================================ # Cloudfront [Cheat Sheet - Cloudfront](https://tutorialsdojo.com/amazon-cloudfront) [geoproximity-routing-vs-geolocation-routing](https://tutorialsdojo.com/latency-routing-vs-geoproximity-routing-vs-geolocation-routing) ## What is Amazon CloudFront? [What is Amazon CloudFront?](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Introduction.html) - Amazon CloudFront is a web service that speeds up distribution of your static and dynamic web content, such as .html, .css, .js, and image files, to your users. - CloudFront delivers your content through a worldwide network of data centers called edge locations. - When a user requests content that you're serving with CloudFront, the request is routed to the edge location that provides the lowest latency (time delay), so that content is delivered with the best possible performance. - If the content is already in the edge location with the lowest latency, CloudFront delivers it immediately. - If the content is not in that edge location, CloudFront retrieves it from an origin that you've defined—such as an Amazon S3 bucket, a MediaPackage channel, or an HTTP server (for example, a web server) that you have identified as the source for the definitive version of your content. ## Restricting the geographic distribution of your content [Restricting the geographic distribution of your content](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/georestrictions.html) - You can use geo restriction, also known as geo blocking, to prevent users in specific geographic locations from accessing content that you're distributing through a CloudFront distribution. - To use geo restriction, you have two options: - Use the CloudFront geo restriction feature. - Use this option to restrict access to all of the files that are associated with a distribution and to restrict access at the country level. - Use a third-party geolocation service. Use this option to restrict access to a subset of the files that are associated with a distribution or to restrict access at a finer granularity than the country level. ## Optimizing Caching and availability ### Optimizing high availability with CloudFront origin failover [Optimizing high availability with CloudFront origin failover](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/high_availability_origin_failover.html) - You can set up CloudFront with origin failover for scenarios that require high availability. - To get started, you create an origin group with two origins: a primary and a secondary. - If the primary origin is unavailable, or returns specific HTTP response status codes that indicate a failure, CloudFront automatically switches to the secondary origin. ## Configuring secure access and restricting access to content ### Using field-level encryption to help protect sensitive data [Using field-level encryption to help protect sensitive data](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/field-level-encryption.html) - With Amazon CloudFront, you can enforce secure end-to-end connections to origin servers by using HTTPS - Field-level encryption adds an additional layer of security that lets you protect specific data throughout system processing so that only certain applications can see it. - Field-level encryption allows you to enable your users to securely upload sensitive information to your web servers. - The sensitive information provided by your users is encrypted at the edge, close to the user, and remains encrypted throughout your entire application stack. - This encryption ensures that only applications that need the data and have the credentials to decrypt it are able to do so. - To use field-level encryption, when you configure your CloudFront distribution, specify the set of fields in POST requests that you want to be encrypted, and the public key to use to encrypt them. - You can encrypt up to 10 data fields in a request ### Using AWS WAF to control access to your content [Using AWS WAF to control access to your content](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-awswaf.html) - AWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to CloudFront, and lets you control access to your content - Based on conditions that you specify, such as the values of query strings or the IP addresses that requests originate from, CloudFront responds to requests either with the requested content or with an HTTP status code 403 (Forbidden). ### Restricting access to Amazon S3 content by using an origin access identity (OAI) [Restricting access to Amazon S3 content by using an origin access identity (OAI)](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html) To restrict access to content that you serve from Amazon S3 buckets, follow these steps: - Create a special CloudFront user called an origin access identity (OAI) and associate it with your distribution - Configure your S3 bucket permissions so that CloudFront can use the OAI to access the files in your bucket and serve them to your users. Make sure that users can’t use a direct URL to the S3 bucket to access a file there. ### Restricting access to files on custom origins [Restricting access to files on custom origins](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-overview.html#forward-custom-headers-restrict-access) - If you use a custom origin, you can optionally set up custom headers to restrict access. - But by using custom headers, you can further restrict access to your content so that users can access it only through CloudFront, not directly. - To require that users access content through CloudFront, change the following settings in your CloudFront distributions: - Origin Custom Headers: Configure CloudFront to forward custom headers to your origin. ## Serving private content with signed URLs and signed cookies [Serving private content with signed URLs and signed cookies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) - Many companies that distribute content over the internet want to restrict access to documents, business data, media streams, or content that is intended for selected users, for example, users who have paid a fee. - Require that your users access your private content by using special CloudFront signed URLs or signed cookies. - Require that your users access your content by using CloudFront URLs, not URLs that access content directly on the origin server (for example, Amazon S3 or a private HTTP server). ## Optimizing caching and availability ### Increasing the proportion of requests that are served directly from the CloudFront caches (cache hit ratio) [Increasing the proportion of requests that are served directly from the CloudFront caches (cache hit ratio)](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cache-hit-ratio.html) - You can improve performance by increasing the proportion of your viewer requests that are served directly from the CloudFront cache instead of going to your origin servers for content. - This is known as improving the cache hit ratio. - Specifying how long CloudFront caches your objects - To increase your cache hit ratio, you can configure your origin to add a Cache-Control max-age directive to your objects, and specify the longest practical value for max-age ### Requiring HTTPS for communication between viewers and CloudFront [Requiring HTTPS for communication between viewers and CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-https-viewers-to-cloudfront.html) - You can configure one or more cache behaviors in your CloudFront distribution to require HTTPS for communication between viewers and CloudFront. - You also can configure one or more cache behaviors to allow both HTTP and HTTPS, so that CloudFront requires HTTPS for some objects but not for others. - The configuration steps depend on which domain name you're using in object URLs: - If you're using the domain name that CloudFront assigned to your distribution, such as d111111abcdef8.cloudfront.net, you change the Viewer Protocol Policy setting for one or more cache behaviors to require HTTPS communication. In that configuration, CloudFront provides the SSL/TLS certificate. - If you're using your own domain name, such as example.com, you need to change several CloudFront settings. You also need to use an SSL/TLS certificate provided by AWS Certificate Manager (ACM), or import a certificate from a third-party certificate authority into ACM or the IAM certificate store. ### Managing how long content stays in the cache (expiration) [Managing how long content stays in the cache (expiration)](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) - You can control how long your files stay in a CloudFront cache before CloudFront forwards another request to your origin - If the CloudFront cache already has the latest version, the origin returns a status code 304 Not Modified. - If the CloudFront cache does not have the latest version, the origin returns a status code 200 OK and the latest version of the file. ## Customizing at the edge with Lambda@Edge [Customizing at the edge with Lambda@Edge](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-at-the-edge.html) - Lambda@Edge is an extension of AWS Lambda, a compute service that lets you execute functions that customize the content that CloudFront delivers. - When you associate a CloudFront distribution with a Lambda@Edge function, CloudFront intercepts requests and responses at CloudFront edge locations. You can execute Lambda functions when the following CloudFront events occur: - When CloudFront receives a request from a viewer (viewer request) - Before CloudFront forwards a request to the origin (origin request) - When CloudFront receives a response from the origin (origin response) - Before CloudFront returns the response to the viewer (viewer response) [Get started creating and using Lambda@Edge functions](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-edge-how-it-works.html) ## Request and response behavior for custom origins ### User-Agent header [User-Agent header](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/RequestAndResponseBehaviorCustomOrigin.html#request-custom-user-agent-header) - If you want CloudFront to cache different versions of your objects based on the device that a user is using to view your content, we recommend that you configure CloudFront to forward one or more of the following headers to your custom origin: - CloudFront-Is-Desktop-Viewer - CloudFront-Is-Mobile-Viewer - CloudFront-Is-SmartTV-Viewer - CloudFront-Is-Tablet-Viewer ## Blogs [How do I use CloudFront to serve a static website hosted on Amazon S3?](https://aws.amazon.com/premiumsupport/knowledge-center/cloudfront-serve-static-website) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/cloudHSM.md ================================================ # CloudHSM [AWS CloudHSM](https://aws.amazon.com/cloudhsm) - AWS CloudHSM is a cloud-based hardware security module (HSM) that enables you to easily generate and use your own encryption keys on the AWS Cloud ## Improve Your Web Server's Security with SSL/TLS Offload in AWS CloudHSM [Improve Your Web Server's Security with SSL/TLS Offload in AWS CloudHSM](https://docs.aws.amazon.com/cloudhsm/latest/userguide/ssl-offload.html) - Web servers and their clients (web browsers) can use Secure Sockets Layer (SSL) or Transport Layer Security (TLS). - These protocols confirm the identity of the web server and establish a secure connection to send and receive webpages or other data over the internet. This is commonly known as HTTPS. ### How SSL/TLS Offload with AWS CloudHSM Works [How SSL/TLS Offload with AWS CloudHSM Works](https://docs.aws.amazon.com/cloudhsm/latest/userguide/ssl-offload-overview.html) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/cloudTrail.md ================================================ # CloudTrail [Cheat Sheet - AWS CloudTrail](https://tutorialsdojo.com/aws-cloudtrail) ## What Is AWS CloudTrail? [What Is AWS CloudTrail?](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-user-guide.html) - AWS CloudTrail is an AWS service that helps you enable governance, compliance, and operational and risk auditing of your AWS account. - Actions taken by a user, role, or an AWS service are recorded as events in CloudTrail. - Events include actions taken in the AWS Management Console, AWS Command Line Interface, and AWS SDKs and APIs. ### Global service events [Global service events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-global-service-events) - For most services, events are recorded in the region where the action occurred. - For global services such as AWS Identity and Access Management (IAM), AWS STS, and Amazon CloudFront, events are delivered to any trail **that includes global services**. ## Identities ### Logging IAM and AWS STS API calls with AWS CloudTrail [Logging IAM and AWS STS API calls with AWS CloudTrail](https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) - IAM and AWS STS are integrated with AWS CloudTrail, a service that provides a record of actions taken by an IAM user or role. - CloudTrail captures all API calls for IAM and AWS STS as events, including calls from the console and from API calls. - If you create a trail, you can enable continuous delivery of CloudTrail events to an Amazon S3 bucket. - If you don't configure a trail, you can still view the most recent events in the CloudTrail console in Event history ## Blogs - [How to Audit Cross-Account Roles Using AWS CloudTrail and Amazon CloudWatch Events](https://aws.amazon.com/blogs/security/how-to-audit-cross-account-roles-using-aws-cloudtrail-and-amazon-cloudwatch-events) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/cloudWatchLogs.md ================================================ # CloudWatch Logs - You can use Amazon CloudWatch Logs to monitor, store, and access your log files from Amazon Elastic Compute Cloud (Amazon EC2) instances, AWS CloudTrail, Route 53, and other sources. - CloudWatch Logs enables you to centralize the logs from all of your systems, applications, and AWS services that you use, in a single, highly scalable service. - CloudWatch Logs enables you to see all of your logs, regardless of their source, as a single and consistent flow of events ordered by time, and you can query them and sort them based on other dimensions, group them by specific fields, create custom computations with a powerful query language, and visualize log data in dashboards. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/cloudsearch.md ================================================ # CloudSearch [Amazon CloudSearch](https://aws.amazon.com/cloudsearch) [Cheat Sheet - Amazon CloudSearch](https://tutorialsdojo.com/amazon-cloudsearch) - Amazon CloudSearch is a managed service in the AWS Cloud that makes it simple and cost-effective to set up, manage, and scale a search solution for your website or application. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/cloudwatch.md ================================================ # CloudWatch [Cheat Sheet - CloudWatch](https://tutorialsdojo.com/amazon-cloudwatch) [cloudwatch-agent-vs-ssm-agent-vs-custom-daemon-scripts](https://tutorialsdojo.com/cloudwatch-agent-vs-ssm-agent-vs-custom-daemon-scripts) ## Schedule Expressions for Rules [Schedule Expressions for Rules](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html) - You can create rules that self-trigger on an automated schedule in CloudWatch Events using cron or rate expressions. - All scheduled events use UTC time zone and the minimum precision for schedules is 1 minute. ## Analyzing log data with CloudWatch Logs Insights [Analyzing log data with CloudWatch Logs Insights](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AnalyzingLogData.html) - CloudWatch Logs Insights enables you to interactively search and analyze your log data in Amazon CloudWatch Logs - You can perform queries to help you more efficiently and effectively respond to operational issues - If an issue occurs, you can use CloudWatch Logs Insights to identify potential causes and validate deployed fixes ### Installing the CloudWatch agent on on-premises servers [Installing the CloudWatch agent on on-premises servers](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/install-CloudWatch-Agent-on-premise.html) - If you have downloaded the CloudWatch agent on one computer and created the agent configuration file you want, you can use that configuration file to install the agent on other on-premises servers. ## Using Amazon CloudWatch alarms [Using Amazon CloudWatch alarms](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html) - You can create both metric alarms and composite alarms in CloudWatch. - A metric alarm - watches a single CloudWatch metric or the result of a math expression based on CloudWatch metrics. - The alarm performs one or more actions based on the value of the metric or expression relative to a threshold over a number of time periods. - The action can be sending a notification to an Amazon SNS topic, performing an Amazon EC2 action or an Amazon EC2 Auto Scaling action, or creating an OpsItem or incident in Systems Manager - A composite alarm - includes a rule expression that takes into account the alarm states of other alarms that you have created. - The composite alarm goes into ALARM state only if all conditions of the rule are met. - The alarms specified in a composite alarm's rule expression can include metric alarms and other composite alarms. ## Creating metrics from log events using filters - You can search and filter the log data coming into CloudWatch Logs by creating one or more metric filters. - Metric filters define the terms and patterns to look for in log data as it is sent to CloudWatch Logs. - CloudWatch Logs uses these metric filters to turn log data into numerical CloudWatch metrics that you can graph or set an alarm on. ### Creating metric filters [Creating metric filters](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/MonitoringPolicyExamples.html) [Monitoring deployments with Amazon CloudWatch Events](https://docs.aws.amazon.com/codedeploy/latest/userguide/monitoring-cloudwatch-events.html) - You can use Amazon CloudWatch Events to detect and react to changes in the state of an instance or a deployment (an "event") in your CodeDeploy operations. - Then, based on rules you create, CloudWatch Events will invoke one or more target actions when a deployment or instance enters the state you specify in a rule. - Depending on the type of state change, you might want to send notifications, capture state information, take corrective action, initiate events, or take other actions. - You can select the following types of targets when using CloudWatch Events as part of your CodeDeploy operations: - AWS Lambda functions - Kinesis streams - Amazon SQS queues Built-in targets (EC2 CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API call , and EC2 TerminateInstances API call) Amazon SNS topics ```bash The following are some use cases: Use a Lambda function to pass a notification to a Slack channel whenever deployments fail. Push data about deployments or instances to a Kinesis stream to support comprehensive, real-time status monitoring. Use CloudWatch alarm actions to automatically stop, terminate, reboot, or recover Amazon EC2 instances when a deployment or instance event you specify occurs. ``` [dynamic-dns-for-route-53](https://aws.amazon.com/blogs/compute/building-a-dynamic-dns-for-route-53-using-cloudwatch-events-and-lambda/) [Real-time processing of log data with subscriptions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Subscriptions.html) - You can use subscriptions to get access to a real-time feed of log events from CloudWatch Logs and have it delivered to other services such as an Amazon Kinesis stream, an Amazon Kinesis Data Firehose stream, or AWS Lambda for custom processing, analysis, or loading to other systems [Sending and Receiving Events Between AWS Accounts](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/CloudWatchEvents-CrossAccountEventDelivery.html) The overall process is as follows: - On the receiver account, edit the permissions on the default event bus to allow specified AWS accounts, an organization, or all AWS accounts to send events to the receiver account. - On the sender account, set up one or more rules that have the receiver account's default event bus as the target. - On the receiver account, set up one or more rules that match events that come from the sender account. #### Creating metrics for log events using filter ##### Filter and Pattern Syntax [Filter and pattern syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html) - You can use metric filters to search for and match terms, phrases, or values in your log events. When a metric filter finds one of the terms, phrases, or values in your log events, you can increment the value of a CloudWatch metric. For example, you can create a metric filter to search for and count the occurrence of the word ERROR in your log events. - When a metric filter finds one of the matching terms, phrases, or values in your log events, it increments the count in the CloudWatch metric by the amount you specify for Metric Value. The metric value is aggregated and reported every minute. ##### Example: Count HTTP 404 codes - Example: Count HTTP 404 codes ```bash For Filter Pattern, type [IP, UserInfo, User, Timestamp, RequestInfo, StatusCode=404, Bytes]. ``` #### Publishing custom metrics [Publishing custom metrics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html) [put-metric-data¶](https://docs.aws.amazon.com/cli/latest/reference/cloudwatch/put-metric-data.html) - You can publish your own metrics to CloudWatch using the AWS CLI or an API. You can view statistical graphs of your published metrics with the AWS Management Console. - instead of calling put-metric-data multiple times for three data points that are within 3 seconds of each other, you can aggregate the data into a statistic set that you publish with one call, using the --statistic-values parameter. ```bash aws cloudwatch put-metric-data --metric-name PageViewCount --namespace MyService --statistic-values Sum=11,Minimum=2,Maximum=5,SampleCount=3 --timestamp 2016-10-14T12:00:00.000Z ``` ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/codeBuild.md ================================================ ### CodeBuild [Create a build project (console)](https://docs.aws.amazon.com/codebuild/latest/userguide/create-project-console.html) - We recommend that you store an environment variable with a sensitive value, such as an AWS access key ID, an AWS secret access key, or a password as a parameter in Amazon EC2 Systems Manager Parameter Store or AWS Secrets Manager. [Docker images provided by CodeBuild](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-available.html) [Extending AWS CodeBuild with Custom Build Environments](https://aws.amazon.com/blogs/devops/extending-aws-codebuild-with-custom-build-environments/) - Build environments are Docker images that include a complete file system with everything required to build and test your project. To use a custom build environment in a CodeBuild project, you build a container image for your platform that contains your build tools, push it to a Docker container registry such as Amazon EC2 Container Registry (ECR), and reference it in the project configuration. When building your application, CodeBuild will retrieve the Docker image from the container registry specified in the project configuration and use the environment to compile your source code, run your tests, and package your application. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/codeCommit.md ================================================ > Revision Count: 1 # CodeCommit [Cheat Sheet - AWS CodeCommit](https://tutorialsdojo.com/aws-codecommit) ## Manage triggers for a repository [Manage triggers for an AWS CodeCommit repository](https://docs.aws.amazon.com/codecommit/latest/userguide/how-to-notify.html) - You can configure a CodeCommit repository so that code pushes or other events trigger actions, such as sending a notification from Amazon Simple Notification Service (Amazon SNS) or invoking a function in AWS Lambda. - You can create up to 10 triggers for each CodeCommit repository. - Data in AWS CodeCommit repositories is already encrypted in transit as well as at rest. ### Example: Create an AWS CodeCommit trigger for an Amazon SNS topic [Example: Create an AWS CodeCommit trigger for an Amazon SNS topic](https://docs.aws.amazon.com/codecommit/latest/userguide/how-to-notify-sns.html) - You can create a trigger for a CodeCommit repository so that events in that repository trigger notifications from an Amazon Simple Notification Service (Amazon SNS) topic ### Example: Create an AWS CodeCommit trigger for an AWS Lambda function [Example: Create an AWS CodeCommit trigger for an AWS Lambda function](https://docs.aws.amazon.com/codecommit/latest/userguide/how-to-notify-lambda.html) - You can create a trigger for a CodeCommit repository so that events in the repository invoke a Lambda function. ### Example: auth-and-access-control-iam-identity-based-access-control [auth-and-access-control-iam-identity-based-access-control](https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-identity-based-access-control.html#identity-based-policies-example-4) - To restrict push to master ```json { "Effect": "Allow", "Action": [ "codecommit:GitPush", "codecommit:Merge*" ], "Resource": [ "arn:aws:codecommit:*:*:the-repo-name" ], "Condition": { "StringNotEquals": { "codecommit:References": [ "refs/heads/master" ] } } } ``` ### Migrate a Git repository to AWS CodeCommit [Share the CodeCommit repository](https://docs.aws.amazon.com/codecommit/latest/userguide/how-to-migrate-repository-existing.html#how-to-migrate-existing-share) - When you create a repository in CodeCommit, two endpoints are generated: one for HTTPS connections and one for SSH connections. - Both provide secure connections over a network ### Using identity-based policies (IAM Policies) for CodeCommit [Using identity-based policies (IAM Policies) for CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-identity-based-access-control.html) - An account administrator can attach permissions policies to IAM identities (users, groups, and roles) to grant permissions to perform operations on CodeCommit resources. - AWS manage policy AWSCodeCommitPowerUser allows users access to CodeCommit but disallows the action of deleting CodeCommit repositories. ### Cross-account repository access: Actions for the administrator in AccountA [Cross-account repository access: Actions for the administrator in AccountA](https://docs.aws.amazon.com/codecommit/latest/userguide/cross-account-administrator-a.html) To allow users or groups in AccountB to access a repository in AccountA, an AccountA administrator must: - Create a policy in AccountA that grants access to the repository. - Create a role in AccountA that can be assumed by IAM users and groups in AccountB. - Attach the policy to the role. [Cross-account repository access: Actions for the administrator in AccountB](https://docs.aws.amazon.com/codecommit/latest/userguide/cross-account-administrator-b.html) To allow users or groups in AccountB to access a repository in AccountA, the AccountB administrator must create a group in AccountB. This group must be configured with a policy having action `"sts:AssumeRole` that allows group members to assume the role created by the AccountA administrator ### Setup steps for SSH connections to AWS CodeCommit repositories on Linux, macOS, or Unix [Setup steps for SSH connections to AWS CodeCommit repositories on Linux, macOS, or Unix](https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-ssh-unixes.html) - After you upload the SSH public key for the IAM user, the user can establish SSH connections to the CodeCommit repositories: #### Supported Operations [Change branch settings in AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/how-to-change-branch.html) - You can change which branch to use as the default branch in the AWS CodeCommit console or with the AWS CLI. - For example, if you created your first commit using a Git client that set the default branch to master, you could create a branch named main, and then change the branch settings so that the new branch is set as the default branch for the repository. - To change other branch settings, you can use Git from a local repo connected to the CodeCommit repository. ### Merge a pull request in an AWS CodeCommit repository [Merge a pull request in an AWS CodeCommit repository](https://docs.aws.amazon.com/codecommit/latest/userguide/how-to-merge-pull-request.html) - After your code has been reviewed and all approval rules (if any) on the pull request have been satisfied, you can merge a pull request in one of several ways: - You can use the console to merge your source branch to the destination branch using one of the available merge strategies, which also closes the pull request. - You can use the AWS CLI to merge and close the pull request using the fast-forward, squash, or 3-way merge strategy. - On your local computer, you can use the git merge command to merge the source branch into the destination branch, and then push your merged code to the destination branch #### Working with pull requests [Edit or delete an approval rule for a pull request](https://docs.aws.amazon.com/codecommit/latest/userguide/how-to-edit-delete-pull-request-approval-rule.html) - When you have an approval rule on a pull request, you cannot merge that pull request until its conditions have been met. - You can change the approval rules for pull requests to make it easier to satisfy their conditions, or to increase the rigor of reviews. You can change the number of users who must approve a pull request. #### Working with approval rule templates [Working with approval rule templates](https://docs.aws.amazon.com/codecommit/latest/userguide/approval-rule-templates.html) - You can create approval rules for pull requests. To automatically apply approval rules to some or all of the pull requests created in repositories, use approval rule templates. Approval rule templates help you customize your development workflows across repositories so that different branches have appropriate levels of approvals and control. You can define different rules for production and development branches. Those rules are applied every time a pull request that matches the rule conditions is created. #### Working with branches [Limit pushes and merges to branches in AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/how-to-conditional-branch.html) - By default, any CodeCommit repository user who has sufficient permissions to push code to the repository can contribute to any branch in that repository. - For example, this policy denies pushing commits, merging branches, deleting branches, merging pull requests, and adding files to a branch named main and a branch named prod in a repository named MyDemoRepo: ```json { "Version": "2012-10-17", "Statement": [ { "Effect": "Deny", "Action": [ "codecommit:GitPush", "codecommit:DeleteBranch", "codecommit:PutFile", "codecommit:MergeBranchesByFastForward", "codecommit:MergeBranchesBySquash", "codecommit:MergeBranchesByThreeWay", "codecommit:MergePullRequestByFastForward", "codecommit:MergePullRequestBySquash", "codecommit:MergePullRequestByThreeWay" ], "Resource": "arn:aws:codecommit:us-east-2:111111111111:MyDemoRepo", "Condition": { "StringEqualsIfExists": { "codecommit:References": [ "refs/heads/main", "refs/heads/prod" ] }, "Null": { "codecommit:References": "false" } } } ] } ``` ##### Configuring notifications for events in an AWS CodeCommit repository [Configuring notifications for events in an AWS CodeCommit repository](https://docs.aws.amazon.com/codecommit/latest/userguide/how-to-repository-email.html) - You can set up notification rules for a repository so that repository users receive emails about the repository event types you specify. - Notifications are sent when events match the notification rule settings. - You can create an Amazon SNS topic to use for notifications or use an existing one in your Amazon Web Services account #### Security [Using identity-based policies (IAM Policies) for CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-identity-based-access-control.html#identity-based-policies-example-4) - identity-based policies demonstrate how an account administrator can attach permissions policies to IAM identities (users, groups, and roles) to grant permissions to perform operations on CodeCommit resources. - Example ```josn { "Version": "2012-10-17", "Statement" : [ { "Effect" : "Allow", "Action" : [ "codecommit:BatchGetRepositories" ], "Resource" : [ "arn:aws:codecommit:us-east-2:111111111111:MyDestinationRepo", "arn:aws:codecommit:us-east-2:111111111111:MyDemo*" ] } ] } ``` ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/codeDeploy.md ================================================ ## Code Deploy [Register an on-premises instance with CodeDeploy](https://docs.aws.amazon.com/codedeploy/latest/userguide/on-premises-instances-register.html) [Use the register command (IAM user ARN) to register an on-premises instance](https://docs.aws.amazon.com/codedeploy/latest/userguide/instances-on-premises-register-instance.html) The register command can create an IAM user for the server and register the server with CodeDeploy ```bash aws deploy register --instance-name AssetTag234AESDD --tags Key=Name,Value=CodeDeployDemo-OnPremise --region eu-west-1 ``` [Manually remove on-premises instance tags from an on-premises instance](https://docs.aws.amazon.com/codedeploy/latest/userguide/on-premises-instances-operations-remove-tags.html) Typically, you remove an on-premises instance tag from an on-premises instance when that tag is no longer being used, or you want to remove the on-premises instance from any deployment groups that rely on that tag. You can use the AWS CLI or the AWS CodeDeploy console to remove on-premises instance tags from on-premises instances. [Create a deployment group for an in-place deployment (console)]() [Deployment configurations on an AWS Lambda compute platform](https://docs.aws.amazon.com/codedeploy/latest/userguide/deployment-configurations.html#deployment-configuration-lambda) There are three ways traffic can shift during a deployment: - Canary: Traffic is shifted in two increments. You can choose from predefined canary options that specify the percentage of traffic shifted to your updated Lambda function version in the first increment and the interval, in minutes, before the remaining traffic is shifted in the second increment. - Linear: Traffic is shifted in equal increments with an equal number of minutes between each increment. You can choose from predefined linear options that specify the percentage of traffic shifted in each increment and the number of minutes between each increment. - All-at-once: All traffic is shifted from the original Lambda function to the updated Lambda function version all at once. ### Working with Deployments [Stop a deployment with CodeDeploy](https://docs.aws.amazon.com/codedeploy/latest/userguide/deployments-stop.html) - You can use the CodeDeploy console, the AWS CLI, or the CodeDeploy APIs to stop deployments associated with your AWS account. [Redeploy and roll back a deployment with CodeDeploy](https://docs.aws.amazon.com/codedeploy/latest/userguide/deployments-rollback-and-redeploy.html#deployments-rollback-and-redeploy-manual-rollbacks) - CodeDeploy rolls back deployments by redeploying a previously deployed revision of an application as a new deployment. These rolled-back deployments are technically new deployments, with new deployment IDs, rather than restored versions of a previous deployment. - For an ongoing deployment, you can choose “Stop deployment” or “Stop and roll back deployment” for a deployment. [Working with deployment configurations in CodeDeploy](https://docs.aws.amazon.com/codedeploy/latest/userguide/deployment-configurations.html) - A deployment configuration is a set of rules and success and failure conditions used by CodeDeploy during a deployment. These rules and conditions are different, depending on whether you deploy to an EC2/On-Premises compute platform, AWS Lambda compute platform, or Amazon ECS compute platform. **Predefined deployment configurations for an EC2/on-premises compute platform** - Consider an example of 9 instances - CodeDeployDefault.AllAtOnce - In-place deployments: - attempts to deploy to all nine instances at once - The overall deployment succeeds if deployment to even a single instance is successful. - It fails only if deployments to all nine instances fail. - BlueGreen - Deployment to replacement environment: Follows the same deployment rules as CodeDeployDefault.AllAtOnce for in-place deployments. - Traffic rerouting: - Routes traffic to all instances in the replacement environment at once. - Succeeds if traffic is successfully rerouted to at least one instance. - Fails after rerouting to all instances fails. - CodeDeployDefault.HalfAtATime - CodeDeployDefault.OneAtATime **Deployment configurations on an AWS Lambda compute platform** - When you deploy to an AWS Lambda compute platform, the deployment configuration specifies the way traffic is shifted to the new Lambda function versions in your application - Canary: Traffic is shifted in two increments. You can choose from predefined canary options that specify the percentage of traffic shifted to your updated Lambda function version in the first increment and the interval, in minutes, before the remaining traffic is shifted in the second increment. - Linear: Traffic is shifted in equal increments with an equal number of minutes between each increment. You can choose from predefined linear options that specify the percentage of traffic shifted in each increment and the number of minutes between each increment. - All-at-once: All traffic is shifted from the original Lambda function to the updated Lambda function version all at once. Predefined deployment configurations for an AWS Lambda compute platform (for all please refer the documentation) - CodeDeployDefault.LambdaCanary10Percent5Minutes: Shifts 10 percent of traffic in the first increment. The remaining 90 percent is deployed five minutes later. - CodeDeployDefault.LambdaLinear10PercentEvery3Minutes : Shifts 10 percent of traffic every three minutes until all traffic is shifted. - CodeDeployDefault.LambdaAllAtOnce: Shifts all traffic to the updated Lambda functions at once. ### Supported By **CanaryDeployment** - AWS Lambda - ECS ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/codePipeline.md ================================================ ### CodePipeline [Grant approval permissions to an IAM user in CodePipeline](https://docs.aws.amazon.com/codepipeline/latest/userguide/approvals-iam-permissions.html) - attaching the AWSCodePipelineApproverAccess managed policy to an IAM user [Approve or reject an approval action in CodePipeline](https://docs.aws.amazon.com/codepipeline/latest/userguide/approvals-approve-or-reject.html) [Invoke an AWS Lambda function in a pipeline in CodePipeline](https://docs.aws.amazon.com/codepipeline/latest/userguide/actions-invoke-lambda-function.html) - Do not log the JSON event that CodePipeline sends to Lambda because this can result in user credentials being logged in CloudWatch Logs. The CodePipeline role uses a JSON event to pass temporary credentials to Lambda in the artifactCredentials field. [CodePipeline pipeline structure reference](https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html) - To specify parallel actions, use the same integer for each action you want to run in parallel. In the console, you can specify a serial sequence for an action by choosing Add action group at the level in the stage where you want it to run, or you can specify a parallel sequence by choosing Add action. Action group refers to a run order of one or more actions at the same level - different action groups have different runOrder values and their actions do not run in parallel. [Configure server-side encryption for artifacts stored in Amazon S3 for CodePipeline](https://docs.aws.amazon.com/codepipeline/latest/userguide/S3-artifact-encryption.html) ```json { "Version": "2012-10-17", "Id": "SSEAndSSLPolicy", "Statement": [ { "Sid": "DenyUnEncryptedObjectUploads", "Effect": "Deny", "Principal": "*", "Action": "s3:PutObject", "Resource": "arn:aws:s3:::codepipeline-us-west-2-89050EXAMPLE/*", "Condition": { "StringNotEquals": { "s3:x-amz-server-side-encryption": "aws:kms" } } }, { "Sid": "DenyInsecureConnections", "Effect": "Deny", "Principal": "*", "Action": "s3:*", "Resource": "arn:aws:s3:::codepipeline-us-west-2-89050EXAMPLE/*", "Condition": { "Bool": { "aws:SecureTransport": "false" } } } ] } ``` [FAQ](https://aws.amazon.com/codepipeline/faqs/) - Pipeline actions occur in a specified order, in serial or in parallel, as determined in the configuration of the stage ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/cognito.md ================================================ > Revision Count: 0 # AWS Cognito [What Is Amazon Cognito?](https://docs.aws.amazon.com/cognito/latest/developerguide/what-is-amazon-cognito.html) [Cheat Sheet - Cognito](https://tutorialsdojo.com/amazon-cognito) [Cheat Sheet - Amazon Cognito User and Identity Pools Explained](https://tutorialsdojo.com/amazon-cognito-user-pools-and-identity-pools-explained) - Amazon Cognito provides authentication, authorization, and user management for your web and mobile apps. - Your users can sign in directly with a user name and password, or through a third party such as Facebook, Amazon, Google or Apple. - The two main components of Amazon Cognito are user pools and identity pools. - User pools are user directories that provide sign-up and sign-in options for your app users. - Identity pools enable you to grant your users access to other AWS services. - You can use identity pools and user pools separately or together. ## Amazon Cognito Identity Pools (Federated Identities) [Amazon Cognito Identity Pools (Federated Identities)](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) - Amazon Cognito identity pools (federated identities) enable you to create unique identities for your users and federate them with identity providers. - With an identity pool, you can obtain temporary, limited-privilege AWS credentials to access other AWS services ### Identity Pools (Federated Identities) External Identity Providers [Identity Pools (Federated Identities) External Identity Providers](https://docs.aws.amazon.com/cognito/latest/developerguide/external-identity-providers.html) - Using the logins property, you can set credentials received from an identity provider. - Moreover, you can associate an identity pool with multiple identity providers. - For example, you could set both the Facebook and Google tokens in the logins property, so that the unique Amazon Cognito identity would be associated with both identity provider logins - No matter which account the end user uses for authentication, Amazon Cognito returns the same user identifier. #### Open ID Connect Providers (Identity Pools) [Open ID Connect Providers (Identity Pools](https://docs.aws.amazon.com/cognito/latest/developerguide/open-id.html) - OpenID Connect is an open standard for authentication that is supported by a number of login providers - Amazon Cognito supports linking of identities with OpenID Connect providers that are configured through AWS Identity and Access Management. ## Integrating Amazon Cognito with web and mobile apps [Integrating Amazon Cognito with web and mobile apps](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-integrate-apps.html) [simplifying-token-vending-machine-deployment-with-aws-cloudformation](https://aws.amazon.com/blogs/mobile/simplifying-token-vending-machine-deployment-with-aws-cloudformation) - By integrating Amazon Cognito with your client code, you connect your app to backend AWS functionality that aids authentication and authorization workflows. - Your app will use the Amazon Cognito API to, for example, create new users in your user pool, retrieve user pool tokens, and obtain temporary credentials from your identity pool. - To integrate Amazon Cognito with your web or mobile app, use the SDKs and libraries that the AWS Amplify framework provides. ### Amazon Cognito user pools [Amazon Cognito user pools](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html) - A user pool is a user directory in Amazon Cognito. - With a user pool, your users can sign in to your web or mobile app through Amazon Cognito. - Your users can also sign in through social identity providers like Google, Facebook, Amazon, or Apple, and through SAML identity providers. - Whether your users sign in directly or through a third party, all members of the user pool have a directory profile that you can access through a Software Development Kit (SDK). ## SSO FAQs [Single Sign On FAQs](https://aws.amazon.com/single-sign-on/faqs) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/commandLineInterface.md ================================================ # Command Line Interface ## Configuration AWS CLI ### Using an HTTP proxy [Using an HTTP proxy](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-proxy.html) - To access AWS through proxy servers, you can configure the HTTP_PROXY and HTTPS_PROXY environment variables with either the DNS domain names or IP addresses and port numbers that your proxy servers use. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/config.md ================================================ # Config [What Is AWS Config?](https://docs.aws.amazon.com/config/latest/developerguide/WhatIsConfig.html) - AWS Config provides a detailed view of the configuration of AWS resources in your AWS account. This includes how the resources are related to one another and how they were configured in the past so that you can see how the configurations and relationships change over time [Cheat Sheet - AWS Config](https://tutorialsdojo.com/aws-config) - AWS Config provides a detailed view of the configuration of AWS resources in your AWS account. - This includes how the resources are related to one another and how they were configured in the past so that you can see how the configurations and relationships change over time. - An AWS resource is an entity you can work with in AWS, such as an Amazon Elastic Compute Cloud (EC2) instance, an Amazon Elastic Block Store (EBS) volume, a security group, or an Amazon Virtual Private Cloud (VPC). With AWS Config, you can do the following: - Evaluate your AWS resource configurations for desired settings. - Get a snapshot of the current configurations of the supported resources that are associated with your AWS account. - Retrieve configurations of one or more resources that exist in your account. - Retrieve historical configurations of one or more resources. - Receive a notification whenever a resource is created, modified, or deleted. - View relationships between resources. For example, you might want to find all resources that use a particular security group. [AWS Config](https://aws.amazon.com/config/) - AWS Config is a service that enables you to assess, audit, and evaluate the configurations of your AWS resources. - Config continuously monitors and records your AWS resource configurations and allows you to automate the evaluation of recorded configurations against desired configurations. - With Config, you can review changes in configurations and relationships between AWS resources, dive into detailed resource configuration histories, and determine your overall compliance against the configurations specified in your internal guidelines. ## AWS Config Managed Rules [AWS Config Managed Rules](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_use-managed-rules.html) - AWS Config provides AWS managed rules, which are predefined, customizable rules that AWS Config uses to evaluate whether your AWS resources comply with common best practices. - For example, you could use a managed rule to quickly start assessing whether your Amazon Elastic Block Store (Amazon EBS) volumes are encrypted or whether specific tags are applied to your resources - You can set up and activate these rules without writing the code to create an AWS Lambda function, which is required if you want to create custom rules. - The AWS Config console guides you through the process of configuring and activating a managed rule. The evaluation triggers are defined as part of the rule, and they can include the following types: - Configuration changes - AWS Config triggers the evaluation when any resource that matches the rule's scope changes in configuration. The evaluation runs after AWS Config sends a configuration item change notification. - Periodic - AWS Config runs evaluations for the rule at a frequency that you choose (for example, every 24 hours). ### Managed Rules [List of AWS Config Managed Rules](https://docs.aws.amazon.com/config/latest/developerguide/managed-rules-by-aws-config.html) - [approved-amis-by-id](https://docs.aws.amazon.com/config/latest/developerguide/approved-amis-by-id.html) - Checks if running instances are using specified AMIs. Specify a list of approved AMI IDs. Running instances with AMIs that are not on this list are NON_COMPLIANT. --- ### Viewing Compliance History Timeline for Resources [Viewing Compliance History Timeline for Resources](https://docs.aws.amazon.com/config/latest/developerguide/view-compliance-history.html) - AWS Config supports storing compliance state changes of resources as evaluated by AWS Config Rules. The resource compliance history is presented in the form of a timeline. The timeline captures changes as ConfigurationItems over a period of time for a specific resource. ### AWS Config Rules #### Specifying Triggers [Specifying Triggers for AWS Config Rules](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config-rules.html) - When you add a rule to your account, you can specify when you want AWS Config to run the rule; this is called a trigger. AWS Config evaluates your resource configurations against the rule when the trigger occurs. - There are two types of triggers: - Configuration changes - Periodic #### Managing Rules ##### restricted-ssh [restricted-ssh](https://docs.aws.amazon.com/config/latest/developerguide/restricted-ssh.html) - Checks if the incoming SSH traffic for the security groups is accessible. The rule is COMPLIANT when IP addresses of the incoming SSH traffic in the security groups are restricted (CIDR other than 0.0.0.0/0). This rule applies only to IPv4. #### Remediating Noncompliant AWS Resources by AWS Config Rules [Remediating Noncompliant AWS Resources by AWS Config Rules](https://docs.aws.amazon.com/config/latest/developerguide/remediation.html) - AWS Config allows you to remediate noncompliant resources that are evaluated by AWS Config Rules. AWS Config applies remediation using AWS Systems Manager Automation documents. These documents define the actions to be performed on noncompliant AWS resources evaluated by AWS Config Rules. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/connect.md ================================================ # Connect [Amazon Connect](https://aws.amazon.com/connect) - Provide superior customer service at a lower cost with an easy-to-use omnichannel cloud contact center ## Blogs [Easily set up interactive messages for your Amazon Connect chatbot](https://aws.amazon.com/blogs/contact-center/easily-set-up-interactive-messages-for-your-amazon-connect-chatbot) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/dataMigrationService.md ================================================ # Data Migration Service [AWS Data Migration Service](https://aws.amazon.com/dms) [Cheatsheet - AWS Data Migration Service](https://tutorialsdojo.com/aws-database-migration-service) - AWS Database Migration Service (AWS DMS) helps you migrate databases to AWS quickly and securely. - The source database remains fully operational during the migration, minimizing downtime to applications that rely on the database. - The AWS Database Migration Service can migrate your data to and from the most widely used commercial and open-source databases. - AWS Database Migration Service supports homogeneous migrations such as Oracle to Oracle, as well as heterogeneous migrations between different database platforms, such as Oracle or Microsoft SQL Server to Amazon Aurora ## How AWS Database Migration Service works [How AWS Database Migration Service works](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Introduction.html) - AWS Database Migration Service (AWS DMS) is a web service that you can use to migrate data from a source data store to a target data store. - These two data stores are called endpoints. - You can migrate between source and target endpoints that use the same database engine, such as from an Oracle database to an Oracle database. - You can also migrate between source and target endpoints that use different database engines, such as from an Oracle database to a PostgreSQL database. - The only requirement to use AWS DMS is that one of your endpoints must be on an AWS service. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/developerToolsConsole.md ================================================ ### DeveloperToolsConsole [Security for features of the Developer Tools console](https://docs.aws.amazon.com/dtconsole/latest/userguide/security.html#security-notifications) - You can choose to limit the details included in notifications to only what is included in an event. This is referred to as the Basic detail type. These events contain exactly the same information as is sent to Amazon EventBridge and Amazon CloudWatch Events. [Create a notification rule for a repository](https://docs.aws.amazon.com/dtconsole/latest/userguide/getting-started-repository.html) - You can create notification rules to send notifications about repository events that are important to you. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/directConnect.md ================================================ # Direct Connect [Cheat Sheet - Direct Connect](https://tutorialsdojo.com/aws-direct-connect) [AWS Whitepapaer - AWS Direct Connect](https://docs.aws.amazon.com/whitepapers/latest/aws-vpc-connectivity-options/aws-direct-connect.html) [How can I configure VPN as a backup for my AWS Direct Connect connection?](https://aws.amazon.com/premiumsupport/knowledge-center/configure-vpn-backup-dx) - AWS Direct Connect makes it easy to establish a dedicated connection from an on-premises network to one or more VPCs in the same region ### Working with Direct Connect gateways [Working with Direct Connect gateways](https://docs.aws.amazon.com/directconnect/latest/UserGuide/direct-connect-gateways.html) ## Blogs [Which type of virtual interface should I use to connect different resources in AWS?](https://aws.amazon.com/premiumsupport/knowledge-center/public-private-interface-dx) - AWS Direct Connect (DX) provides three types of virtual interfaces: public, private, and transit. How do I determine which type I should use to connect different resources (public or private) in AWS? - To connect to AWS resources that are reachable by a public IP address (such as an Amazon Simple Storage Service bucket) or AWS public endpoints, use a public virtual interface - To connect to your resources hosted in an Amazon Virtual Private Cloud (Amazon VPC) using their private IP addresses, use a private virtual interface. - To connect to your resources hosted in an Amazon VPC (using their private IP addresses) through a transit gateway, use a transit virtual interface [New – AWS Direct Connect Gateway – Inter-Region VPC Access](https://aws.amazon.com/blogs/aws/new-aws-direct-connect-gateway-inter-region-vpc-access/) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/dynamodb.md ================================================ > Revision Count: 0 # Dynamodb [Cheatsheet - Dynamodb](https://tutorialsdojo.com/amazon-dynamodb) - fast - highly scalable - highly available, - cost-effective - non-relational database service [Introduction](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Introduction.html) What Is Amazon DynamoDB? Amazon DynamoDB is a fully managed NoSQL database service that provides fast and predictable performance with seamless scalability. DynamoDB lets you offload the administrative burdens of operating and scaling a distributed database so that you don't have to worry about hardware provisioning, setup and configuration, replication, software patching, or cluster scaling. DynamoDB also offers encryption at rest, which eliminates the operational burden and complexity involved in protecting sensitive data [Best Practices for Storing Large Items and Attributes](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-use-s3-too.html) - Amazon DynamoDB currently limits the size of each item that you store in a table (see Service, Account, and Table Quotas in Amazon DynamoDB). If your application needs to store more data in an item than the DynamoDB size limit permits, you can try compressing one or more large attributes or breaking the item into multiple items (efficiently indexed by sort keys). You can also store the item as an object in Amazon Simple Storage Service (Amazon S3) and store the Amazon S3 object identifier in your DynamoDB item. **High Availability and Durability** - DynamoDB automatically spreads the data and traffic for your tables over a sufficient number of servers to handle your throughput and storage requirements, while maintaining consistent and fast performance. All of your data is stored on solid-state disks (SSDs) and is automatically replicated across multiple Availability Zones in an AWS Region, providing built-in high availability and data durability - You can use global tables to keep DynamoDB tables in sync across AWS Regions [Dynamodb best practices](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/best-practices.html) ### Global Tables [Global Tables: Multi-Region Replication with DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) - Amazon DynamoDB global tables provide a fully managed solution for deploying a multiregion, multi-active database, without having to build and maintain your own replication solution. - With global tables you can specify the AWS Regions where you want the table to be available. - DynamoDB performs all of the necessary tasks to create identical tables in these Regions and propagate ongoing data changes to all of them. [Amazon DynamoDB global tables](https://aws.amazon.com/dynamodb/global-tables/) ## Security ### Identity and Access Management in Amazon DynamoDB [Identity and Access Management in Amazon DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/authentication-and-access-control.html) - Access to Amazon DynamoDB requires credentials. Those credentials must have permissions to access AWS resources, such as an Amazon DynamoDB table or an Amazon Elastic Compute Cloud (Amazon EC2) instance. - Authentication - Access Control ## Error Handling with DynamoDB [Error Handling with DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html) ## Note - RDS MySQL is not as scalable and cost-effective as DynamoDB. - It is not recommended to store authorization tokens permanently on DynamoDB tables. These tokens should be generated upon user authentication and then temporarily saved on a DynamoDB for a fixed session length. ## Blogs [New – Auto Scaling for Amazon DynamoDB](https://aws.amazon.com/blogs/aws/new-auto-scaling-for-amazon-dynamodb) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/ec2.md ================================================ # EC2 [Cheat Sheet - EBS](https://tutorialsdojo.com/amazon-ebs) [EC2](https://tutorialsdojo.com/amazon-elastic-compute-cloud-amazon-ec2) [dedicated-hosts](https://aws.amazon.com/ec2/dedicated-hosts) - Network Load Balancers do not use security groups. [iam-roles-for-amazon-ec2](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) - The AWS SDKs assume the IAM roles attached in the instances and get temporary credentials by interacting with the AWS STS service. [security-group-load-balancer](https://aws.amazon.com/premiumsupport/knowledge-center/security-group-load-balancer/) [How do I stop and start Amazon EC2 instances at regular intervals using Lambda?](https://aws.amazon.com/premiumsupport/knowledge-center/start-stop-lambda-cloudwatch) To stop and start EC2 instances at regular intervals using Lambda, do the following: - Create a custom AWS Identity and Access Management (IAM) policy and execution role for your Lambda function. - Create Lambda functions that stop and start your EC2 instances. - Test your Lambda functions. - Create CloudWatch Events rules that trigger your function on a schedule. This example setup is a simple solution. For a more robust solution, use the AWS Instance Scheduler ## EC2 Spot ### Getting Started with Amazon EC2 Spot Instances [Getting Started with Amazon EC2 Spot Instances](https://aws.amazon.com/ec2/spot/getting-started/) #### No Spot capacity available [Why am I receiving a "no Spot capacity available" error when trying to launch an Amazon EC2 Spot Instance?](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-spot-instance-insufficient-capacity/) - Be flexible about which instance types you request and which Availability Zones you deploy your workload in [Spot Instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances.html) ### Elastic network interfaces [Elastic network interfaces](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html) - An elastic network interface is a logical networking component in a VPC that represents a virtual network card. ### Multiple IP addresses [Multiple IP addresses](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/MultipleIP.html) - You can specify multiple private IPv4 and IPv6 addresses for your instances. - The number of network interfaces and private IPv4 and IPv6 addresses that you can specify for an instance depends on the instance type. ## Fleets ### Example 5: Launch a Spot Fleet using the diversified allocation strategy [Example 5: Launch a Spot Fleet using the diversified allocation strategy](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-examples.html#fleet-config5) - A best practice to increase the chance that a spot request can be fulfilled by EC2 capacity in the event of an outage in one of the Availability Zones is to diversify across zones. - For this scenario, include each Availability Zone available to you in the launch specification. And, instead of using the same subnet each time, use three unique subnets (each mapping to a different zone). ### Dedicated Instances [Dedicated Instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-instance.html) - Dedicated Instances are Amazon EC2 instances that run in a virtual private cloud (VPC) on hardware that's dedicated to a single customer. ```bash Tenancy type to be dedicated host. ``` ## Storage ### Amazon EBS volume types [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - Solid state drives (SSD) — Optimized for transactional workloads involving frequent read/write operations with small I/O size, where the dominant performance attribute is IOPS. - Hard disk drives (HDD) — Optimized for large streaming workloads where the dominant performance attribute is throughput. - Previous generation — Hard disk drives that can be used for workloads with small datasets where data is accessed infrequently and performance is not of primary importance There are several factors that can affect the performance of EBS volumes, such as instance configuration, I/O characteristics, and workload demand [Comparision of varios block storage types](https://aws.amazon.com/ebs/features) ## Instances ### Reserved Instances [Reserved Instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-reserved-instances.html) - Reserved Instances provide you with significant savings on your Amazon EC2 costs compared to On-Demand Instance pricing. - Reserved Instances are not physical instances, but rather a billing discount applied to the use of On-Demand Instances in your account. ## Security #### Supported resource-level permissions for Amazon EC2 API actions [Supported resource-level permissions for Amazon EC2 API actions](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-policy-structure.html#ec2-supported-iam-actions-resources) - Resource-level permissions refers to the ability to specify which resources users are allowed to perform actions on. - Amazon EC2 has partial support for resource-level permissions. - This means that for certain Amazon EC2 actions, you can control when users are allowed to use those actions based on conditions that have to be fulfilled, or specific resources that users are allowed to use. - For example, you can grant users permissions to launch instances, but only of a specific type, and only using a specific AMI. ## VM Import/Export [VM Import/Export](https://aws.amazon.com/ec2/vm-import) - VM Import/Export enables you to easily import virtual machine images from your existing environment to Amazon EC2 instances and export them back to your on-premises environment ## Dynamic Scaling ### Scaling based on Amazon SQS [Scaling based on Amazon SQS](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-using-sqs-queue.html) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/ec2AutoScaling.md ================================================ # EC2 Autoscaling [Scheduled scaling for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/schedule_time.html) - cooldown timer does not influence the scheduled activity [Cooldown](https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) - even if the cooldown timer is running, the scheduled action takes high priority and executes immediately [Amazon EC2 Auto Scaling lifecycle hooks](https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html) - When a scale-out event occurs, your newly launched instance completes its startup sequence and transitions to a wait state. - While the instance is in a wait state, it runs a script to download and install the needed software packages for your application, making sure that your instance is fully ready before it starts receiving traffic. - When the script is finished installing software, it sends the complete-lifecycle-action command to continue. [AutoScalingReplacingUpdate policy](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html) To specify how AWS CloudFormation handles replacement updates for an Auto Scaling group, use the AutoScalingReplacingUpdate policy. This policy enables you to specify whether AWS CloudFormation replaces an Auto Scaling group with a new one or replaces only the instances in the Auto Scaling group. ### Supports Following Deployment Methods - AllAtOnce - HalfAtATime - OneAtATime. But it does not support the canary type. ### Auto Scaling groups with multiple instance types and purchase options - You can launch and automatically scale a fleet of On-Demand Instances and Spot Instances within a single Auto Scaling group. In addition to receiving discounts for using Spot Instances, you can use Reserved Instances or a Savings Plan to receive discounted rates of the regular On-Demand Instance pricing. ## Autoscaling Groups [What is Amazon EC2 Auto Scaling?](https://docs.aws.amazon.com/autoscaling/ec2/userguide/what-is-amazon-ec2-auto-scaling.html) - Amazon EC2 Auto Scaling helps you ensure that you have the correct number of Amazon EC2 instances available to handle the load for your application. - You create collections of EC2 instances, called Auto Scaling groups - You can specify the minimum number of instances in each Auto Scaling group, and Amazon EC2 Auto Scaling ensures that your group never goes below this size. - You can specify the maximum number of instances in each Auto Scaling group, and Amazon EC2 Auto Scaling ensures that your group never goes above this size. ### Elastic Load Balancing and Amazon EC2 Auto Scaling [Elastic Load Balancing and Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-load-balancer.html) - Elastic Load Balancing automatically distributes your incoming application traffic across all the EC2 instances that you are running. - Elastic Load Balancing helps to manage incoming requests by optimally routing traffic so that no one instance is overwhelmed. - To use Elastic Load Balancing with your Auto Scaling group, attach the load balancer to your Auto Scaling group. - This registers the group with the load balancer, which acts as a single point of contact for all incoming web traffic to your Auto Scaling group. ## Elastic Load Balancing ### Attaching a load balancer to your Auto Scaling group [Attaching a load balancer to your Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/attach-load-balancer-asg.html) - Amazon EC2 Auto Scaling integrates with Elastic Load Balancing to help you to insert an Application Load Balancer, Network Load Balancer, Classic Load Balancer, or Gateway Load Balancer in front of your Auto Scaling group. - When you attach an Application Load Balancer, Network Load Balancer, or Gateway Load Balancer, you attach a target group. - Amazon EC2 Auto Scaling adds instances to the attached target group when they are launched. - You can attach one or multiple target groups, and configure health checks on a per target group basis. ### Scaling based on Amazon SQS [Scaling based on Amazon SQS](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-using-sqs-queue.html) - There are some scenarios where you might think about scaling in response to activity in an Amazon SQS queue. - For example, suppose that you have a web app that lets users upload images and use them online. ### Temporarily removing instances from your Auto Scaling group [Temporarily removing instances from your Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-enter-exit-standby.html) - You can put an instance that is in the InService state into the Standby state, update or troubleshoot the instance, and then return the instance to service. Instances that are on standby are still part of the Auto Scaling group, but they do not actively handle load balancer traffic. - Amazon EC2 Auto Scaling does not perform health checks on instances that are in a standby state. ```bash aws autoscaling enter-standby --instance-ids i-05b4f7d5be44822a6 \ --auto-scaling-group-name my-asg --should-decrement-desired-capacity ``` ### Monitoring #### Health checks for Auto Scaling instances [Health checks for Auto Scaling instances](https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html) The health status of an Auto Scaling instance is either healthy or unhealthy. All instances in your Auto Scaling group start in the healthy state. Instances are assumed to be healthy unless Amazon EC2 Auto Scaling receives notification that they are unhealthy. This notification can come from one or more of the following sources: Amazon EC2, Elastic Load Balancing (ELB), or a custom health check. **Instance health status** Amazon EC2 Auto Scaling can determine the health status of an instance using one or more of the following: - Status checks provided by Amazon EC2 to identify hardware and software issues that may impair an instance. The default health checks for an Auto Scaling group are EC2 status checks only. - Health checks provided by Elastic Load Balancing (ELB). These health checks are disabled by default but can be enabled. - Your custom health checks. Using custom health checks ```bash aws autoscaling set-instance-health --instance-id i-123abc45d --health-status Unhealthy ``` Health check grace period - By default, the health check grace period is 300 seconds when you create an Auto Scaling group from the AWS Management Console. Its default value is 0 seconds when you create an Auto Scaling group using the AWS CLI or an SDK. - If you add a lifecycle hook, the grace period does not start until the lifecycle hook actions are completed and the instance enters the InService state. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/ecs.md ================================================ # ECS [Cheetsheet - ECS](https://tutorialsdojo.com/amazon-elastic-container-service-amazon-ecs/) [CheatSheet - Elastic Container Service (ECS) vs Lambda](https://tutorialsdojo.com/ec2-container-service-ecs-vs-lambda) [CheatSheet - AWS Fargate](https://tutorialsdojo.com/aws-fargate) [What is Amazon Elastic Container Service?](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/Welcome.html) - highly scalable, fast container management service that makes it easy to run, stop, and manage containers on a cluster - Your containers are defined in a task definition that you use to run individual tasks or tasks within a service - In this context, a service is a configuration that enables you to run and maintain a specified number of tasks simultaneously in a cluster. - You can run your tasks and services on a serverless infrastructure that is managed by AWS Fargate. Alternatively, for more control over your infrastructure, you can run your tasks and services on a cluster of Amazon EC2 instances that you manage. [What is ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/Welcome.html) [Target tracking scaling policies](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-autoscaling-targettracking.html) [How do I troubleshoot Amazon ECS tasks that take a long time to stop when the container instance is set to DRAINING?](https://aws.amazon.com/premiumsupport/knowledge-center/ecs-tasks-stop-delayed-draining/) [service_definition_parameters](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service_definition_parameters.html) - If a service is using the rolling update (ECS) deployment type, the minimumHealthyPercent represents a lower limit on the number of your service's tasks that must remain in the RUNNING state during a deployment - Minimum healthy percent represents a lower limit on the tasks. When the parameter is set to 100, the number of the service's running tasks would be equal or more than the desired count of tasks during a rolling update. [Blue/Green deployment with CodeDeploy](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-bluegreen.html) - You must configure the service to use either an Application Load Balancer or Network Load Balancer. Classic Load Balancers aren't supported > The Fargate launch type is unnecessary for the blue/green deployment type. The EC2 launch type is also supported. - When you initially create a CodeDeploy application and deployment group, you must specify the following: You must define two target groups for the load balancer ## Using Spot Instances - A Spot Instance is an unused Amazon EC2 instance that is available for less than the On-Demand price. - The hourly price for a Spot Instance is called a Spot price ### Spot Instance Draining - Amazon EC2 terminates, stops, or hibernates your Spot Instance when the Spot price exceeds the maximum price for your request or capacity is no longer available - Amazon EC2 provides a Spot Instance interruption notice, which gives the instance a two-minute warning before it is interrupted. - If Amazon ECS Spot Instance draining is enabled on the instance, ECS receives the Spot Instance interruption notice and places the instance in DRAINING status. - When a container instance is set to DRAINING, Amazon ECS prevents new tasks from being scheduled for placement on the container instance. - Service tasks on the draining container instance that are in the PENDING state are stopped immediately - If there are container instances in the cluster that are available, replacement service tasks are started on them. ```bash # To enable Spot Instance draining for an existing container instance # Edit the /etc/ecs/ecs.config file and add the following: ECS_ENABLE_SPOT_INSTANCE_DRAINING=true ``` [Four Steps to Run ECS Clusters on EC2 Spot Instances](https://aws.amazon.com/ec2/spot/containers-for-less/get-started/) ## Task Definations Amazon ECS enables you to inject sensitive data into your containers by storing your sensitive data in either AWS Secrets Manager secrets or AWS Systems Manager Parameter Store parameters and then referencing them in your container definition. - Store the database credentials using the AWS Secrets Manager - encrypt them using AWS KMS - Create an IAM Role for your Amazon ECS task execution role - and reference it with your task definition which allows access to both KMS and AWS Secrets Manager - Within your container definition, specify secrets with the name of the environment variable to set in the container and the full ARN of the Secrets Manager secret which contains the sensitive data, to present to the container. > Systems Manager Parameter Store service doesn't provide dedicated storage with lifecycle management and key rotation, unlike Secrets Manager. ### Amazon ECS task networking [Amazon ECS task networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) - The networking behavior of Amazon ECS tasks hosted on Amazon EC2 instances is dependent on the network mode defined in the task definition. The following are the available network modes. Amazon ECS recommends using the awsvpc network mode unless you have a specific need to use a different network mode. - awsvpc — The task is allocated its own elastic network interface (ENI) and a primary private IPv4 address. This gives the task the same networking properties as Amazon EC2 instances. - bridge — The task utilizes Docker's built-in virtual network which runs inside each Amazon EC2 instance hosting the task. - host — The task bypasses Docker's built-in virtual network and maps container ports directly to the ENI of the Amazon EC2 instance hosting the task. As a result, you can't run multiple instantiations of the same task on a single Amazon EC2 instance when port mappings are used. - none — The task has no external network connectivity. - In order for you to use security groups and network monitoring tools at a more granular level within your ECS tasks, you have to use the awsvpc network mode ## Troubleshooting ### CannotPullContainer task errors [CannotPullContainer task errors](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_cannot_pull_image.html) [How can I resolve the Amazon ECR error "CannotPullContainerError: API error" in Amazon ECS? ](https://aws.amazon.com/premiumsupport/knowledge-center/ecs-pull-container-api-error-ecr) - One reason : because a route to the internet doesn't exist: ## Amazon ECS on AWS Fargate [Amazon ECS on AWS Fargate](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS_Fargate.html) - AWS Fargate is a technology that you can use with Amazon ECS to run containers without having to manage servers or clusters of Amazon EC2 instances. - With AWS Fargate, you no longer have to provision, configure, or scale clusters of virtual machines to run containers ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/elasticBeanStalk.md ================================================ # ElasticBeanStalk [using-features.rolling-version-deploy](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/using-features.rolling-version-deploy.html) - AWS Elastic Beanstalk provides several options for how deployments are processed, including deployment policies (All at once, Rolling, Rolling with additional batch, Immutable, and Traffic splitting) - If you use blue/green deployment stratergy then two environments are required. [Blue/Green deployments with Elastic Beanstalk](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/using-features.CNAMESwap.html) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/elasticCache.md ================================================ # Elastic Cache [Cheat Sheet - ElasticCache](https://tutorialsdojo.com/amazon-elasticache) ## What is Amazon ElastiCache for Memcached? [What is Amazon ElastiCache for Memcached?](https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/WhatIs.html) - Amazon ElastiCache is a web service that makes it easy to set up, manage, and scale a distributed in-memory data store or cache environment in the cloud. - It provides a high-performance, scalable, and cost-effective caching solution. At the same time, it helps remove the complexity associated with deploying and managing a distributed cache environment. [Elastic Cache - How it works](https://aws.amazon.com/elasticache/) - fully managed, in-memory caching service supporting flexible, real-time use cases. - You can use ElastiCache for caching, which accelerates application and database performance, or as a primary data store for use cases that don't require durability like session stores, gaming leaderboards, streaming, and analytics. - ElastiCache is compatible with Redis and Memcached. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/elasticFileSystem.md ================================================ # Elastic File System [Cheat Sheet - AWS Elastic File System](https://tutorialsdojo.com/amazon-efs) [What is Amazon Elastic File System?](https://docs.aws.amazon.com/efs/latest/ug/whatisefs.html) - Amazon Elastic File System (Amazon EFS) provides a simple, serverless, set-and-forget elastic file system for use with AWS Cloud services and on-premises resources - It is built to scale on demand to petabytes without disrupting applications, growing and shrinking automatically as you add and remove files, eliminating the need to provision and manage capacity to accommodate growth - Amazon EFS supports the Network File System version 4 (NFSv4.1 and NFSv4.0) protocol, so the applications and tools that you use today work seamlessly with Amazon EFS. - Multiple compute instances, including Amazon EC2, Amazon ECS, and AWS Lambda, can access an Amazon EFS file system at the same time, providing a common data source for workloads and applications running on more than one compute instance or server. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/elasticLoadBalancing.md ================================================ # Elastic Load Balancing [Cheat Sheet - ALB vs NLB vs CLB](https://tutorialsdojo.com/application-load-balancer-vs-network-load-balancer-vs-classic-load-balancer) ### Health checks for your target groups [Health checks for your target groups](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/target-group-health-checks.html) - Your Application Load Balancer periodically sends requests to its registered targets to test their status. These tests are called health checks. > If a target group contains only unhealthy registered targets, the load balancer routes requests to all those targets, regardless of their health status. ## Network LoadBalancer [Troubleshoot your Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-troubleshooting.html) ### Target security groups [Target security groups](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/target-group-register-targets.html#target-security-groups) - When you register EC2 instances as targets, you must ensure that the security groups for these instances allow traffic on both the listener port and the health check port. [Deregistration delay](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html#deregistration-delay) ## Notes - Network Load Balancers don't have associated security groups. ## Blogs - [How do I attach a security group to my Elastic Load Balancer?](https://aws.amazon.com/premiumsupport/knowledge-center/security-group-load-balancer) - [AWS Elastic Load Balancing: Support for SSL Termination](https://aws.amazon.com/blogs/aws/elastic-load-balancer-support-for-ssl-termination) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/eventBridge.md ================================================ # Event Bridge ### What is event bridge [What Is Amazon EventBridge?](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-what-is.html) [Video - Intro to Event Bridge](https://youtu.be/TXh5oU_yo9M) Amazon EventBridge is a serverless event bus service that you can use to connect your applications with data from a variety of sources. EventBridge delivers a stream of real-time data from your applications, software as a service (SaaS) applications, and AWS services to targets such as AWS Lambda functions, HTTP invocation endpoints using API destinations, or event buses in other AWS accounts. #### Getting started with Amazon EventBridge [Getting started with Amazon EventBridge](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-get-started.html) To create a rule for events, you specify an action to take when EventBridge receives an event that matches the event pattern in the rule. When an event matches, EventBridge sends the event to the specified target and triggers the action defined in the rule. #### Event buses [Amazon EventBridge event buses](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-bus.html) An event bus is a pipeline that receives events. Rules associated with the event bus evaluate events as they arrive. Each rule checks whether an event matches the rule's criteria. You associate a rule with a specific event bus, so the rule only applies to events received by that event bus. [Video - The following video describes what event buses are and explains some of the basics of them](https://youtu.be/LkEBBgWRKkI) [The following video covers the different event buses and when to use them](https://youtu.be/cB5-GTSJNqc) #### Receiving events from a SaaS partner [Receiving events from a SaaS partner with Amazon EventBridge](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-saas.html) [Video - The following video covers SaaS integrations with EventBridge](https://youtu.be/zxFrM6z8Wdg) #### Targets [Sending and receiving Amazon EventBridge events between AWS accounts](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-cross-account.html) You can configure EventBridge to send and receive events between AWS accounts. When you configure EventBridge to send or receive events between accounts, you can specify which AWS accounts can send events to or receive events from the event bus in your account. [Video - The following video covers routing events between accounts](https://youtu.be/pX_xIW_EuCE) #### Decoupling larger applications with Amazon EventBridge [Decoupling larger applications with Amazon EventBridge](https://aws.amazon.com/blogs/compute/decoupling-larger-applications-with-amazon-eventbridge/) - you can use an event-based architecture to decouple services and functional areas of applications. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/guardDuty.md ================================================ # AWS GuardDuty [What is Amazon GuardDuty?](https://docs.aws.amazon.com/guardduty/latest/ug/what-is-guardduty.html) - Amazon GuardDuty is a continuous security monitoring service that analyzes and processes the following Data sources: VPC Flow Logs, AWS CloudTrail management event logs, CloudTrail S3 data event logs, and DNS logs. It uses threat intelligence feeds, such as lists of malicious IP addresses and domains, and machine learning to identify unexpected and potentially unauthorized and malicious activity within your AWS environment. This can include issues like escalations of privileges, uses of exposed credentials, or communication with malicious IP addresses, or domains. For example, GuardDuty can detect compromised EC2 instances serving malware or mining bitcoin. [How to get started with security response automation on AWS](https://aws.amazon.com/blogs/security/how-get-started-security-response-automation-aws/) ### Finding Types #### EC2 finding types - Recon:EC2/Portscan - Data source: VPC Flow Logs : This finding informs you that the listed EC2 instance in your AWS environment is engaged in a possible port scan attack because it is trying to connect to multiple ports over a short period of time. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/iam.md ================================================ > Revision Count: 1 # Identify And Access Management [Cheat Sheet - Identify And Access Management](https://tutorialsdojo.com/aws-identity-and-access-management-iam) [Cheat Sheet - SCP vs IAM](https://tutorialsdojo.com/service-control-policies-scp-vs-iam-policies) [Cheat Sheet - security-identity-services](https://tutorialsdojo.com/aws-cheat-sheets-security-identity-services) ### Identity providers and federation [Enabling SAML 2.0 federated users to access the AWS Management Console](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) - You can use a role to configure your SAML 2.0-compliant identity provider (IdP) and AWS to permit your federated users to access the AWS Management Console. - The role grants the user permissions to carry out tasks in the console. ### Enabling custom identity broker access to the AWS console [Enabling custom identity broker access to the AWS console](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - You can write and run code to create a URL that lets users who sign in to your organization's network securely access the AWS Management Console. - The URL includes a sign-in token that you get from AWS and that authenticates the user to AWS. ## Logging IAM and AWS STS API calls with AWS CloudTrail [Logging IAM and AWS STS API calls with AWS CloudTrail](https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) - IAM and AWS STS are integrated with AWS CloudTrail, a service that provides a record of actions taken by an IAM user or role. - CloudTrail captures all API calls for IAM and AWS STS as events, including calls from the console and from API calls. - If you create a trail, you can enable continuous delivery of CloudTrail events to an Amazon S3 bucket. - If you don't configure a trail, you can still view the most recent events in the CloudTrail console in Event history ## How IAM roles differ from resource-based policies [How IAM roles differ from resource-based policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_compare-resource-policies.html) - For some AWS services, you can grant cross-account access to your resources. - To do this, you attach a policy directly to the resource that you want to share, instead of using a role as a proxy. - The resource that you want to share must support resource-based policies. - Unlike an identity-based policy, a resource-based policy specifies who (which principal) can access that resource. ### Temporary security credentials in IAM [Temporary security credentials in IAM](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) - You can use the AWS Security Token Service (AWS STS) to create and provide trusted users with temporary security credentials that can control access to your AWS resources. - Temporary security credentials are short-term, as the name implies. They can be configured to last for anywhere from a few minutes to several hours - Temporary security credentials are not stored with the user but are generated dynamically and provided to the user when requested. ## Actions [UploadServerCertificate](https://docs.aws.amazon.com/IAM/latest/APIReference/API_UploadServerCertificate.html) - Uploads a server certificate entity for the AWS account. The server certificate entity includes a public key certificate, a private key, and an optional certificate chain, which should all be PEM-encoded. ## Using an IAM role to grant permissions to applications running on Amazon EC2 instances [Using an IAM role to grant permissions to applications running on Amazon EC2 instances](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html#roles-usingrole-ec2instance-roles) - The administrator uses IAM to create the Get-pics role. - In the role's trust policy, the administrator specifies that only EC2 instances can assume the role - In the role's permission policy, the administrator specifies read-only permissions for the photos bucket. - A developer launches an EC2 instance and assigns the Get-pics role to that instance. - When the application runs, it obtains temporary security credentials from Amazon EC2 instance metadata, - Using the retrieved temporary credentials, the application accesses the photo bucket. - Because of the policy attached to the Get-pics role, the application has read-only permissions. ## Tutorials ### IAM tutorial: Delegate access across AWS accounts using IAM roles [IAM tutorial: Delegate access across AWS accounts using IAM roles](https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_cross-account-with-roles.html) The above tutorial teaches you how to use a role to delegate access to resources in different AWS accounts that you own called Production and Development - Step 1: Create a role in the Production Account - First, you use the AWS Management Console to establish trust between the Production account (ID number 999999999999) and the Development account (ID number 111111111111). - You start by creating an IAM role named UpdateApp. - When you create the role, you define the Development account as a trusted entity and specify a permissions policy that allows trusted users to update the productionapp bucket. ### Providing access to an IAM user in another AWS account that you own [Providing access to an IAM user in another AWS account that you own](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_common-scenarios_aws-accounts.html) - You can grant your IAM users permission to switch to roles within your AWS account or to roles defined in other AWS accounts that you own. ## Controlling access to AWS resources using tags [Controlling access to AWS resources using tags](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html) - You can use tags to control access to your AWS resources that support tagging, including IAM resources. - Imagine that you have Amazon EC2 instances that are critical to your organization. - Instead of directly granting your users permission to terminate the instances, you can create a role with those privileges. - Example scenario using separate development and production accounts - In the production account, an administrator uses IAM to create the UpdateApp role in that account. - In the role, the administrator defines a trust policy that specifies the development account as a Principal, meaning that authorized users from the development account can use the UpdateApp role. - The administrator also defines a permissions policy for the role that specifies the read and write permissions to the Amazon S3 bucket named productionapp. - In the development account, an administrator grants members of the Developers group permission to switch to the role. - This is done by granting the Developers group permission to call the AWS Security Token Service (AWS STS) AssumeRole API for the UpdateApp role. - The user requests switches to the role - AWS STS returns temporary credentials - The temporary credentials allow access to the AWS resource ## Using an IAM role to grant permissions to applications running on Amazon EC2 instances ### Using instance profiles [Using instance profiles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html) - Use an instance profile to pass an IAM role to an EC2 instance. - you can and should use an IAM role to manage temporary credentials for applications that run on an EC2 instance ## Identity federation in AWS [Identity federation in AWS](https://aws.amazon.com/identity/federation) - Identity federation is a system of trust between two parties for the purpose of authenticating users and conveying information needed to authorize their access to resources. - In this system, an identity provider (IdP) is responsible for user authentication, and a service provider (SP), such as a service or an application, controls access to resources. - By administrative agreement and configuration, the SP trusts the IdP to authenticate users and relies on the information provided by the IdP about them. - After authenticating a user, the IdP sends the SP a message, called an assertion, containing the user's sign-in name and other attributes that the SP needs to establish a session with the user and to determine the scope of resource access that the SP should grant. ## Identity providers and federation - If you already manage user identities outside of AWS, you can use IAM identity providers instead of creating IAM users in your AWS account. - With an identity provider (IdP), you can manage your user identities outside of AWS and give these external user identities permissions to use AWS resources in your account ## Providing access to externally authenticated users (identity federation) [Providing access to externally authenticated users (identity federation)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_common-scenarios_federated-users.html) - Your users might already have identities outside of AWS, such as in your corporate directory. - If those users need to work with AWS resources (or work with applications that access those resources), then those users also need AWS security credentials. - You can use an IAM role to specify permissions for users whose identity is federated from your organization or a third-party identity provider (IdP). ### Federating users of a mobile or web-based app with Amazon Cognito - If you create a mobile or web-based app that accesses AWS resources, the app needs security credentials in order to make programmatic requests to AWS. - For most mobile application scenarios, we recommend that you use Amazon Cognito. - for more advanced scenarios, you can work directly with a third-party service like Login with Amazon, Facebook, Google, or any IdP that is compatible with OpenID Connect (OIDC). ### Federating users with SAML 2.0 - If your organization already uses an identity provider software package that supports SAML 2.0 (Security Assertion Markup Language 2.0), you can create trust between your organization as an identity provider (IdP) and AWS as the service provider. - You can then use SAML to provide your users with federated single-sign on (SSO) to the AWS Management Console or federated access to call AWS API operations. ### Federating users by creating a custom identity broker application - If your identity store is not compatible with SAML 2.0, then you can build a custom identity broker application to perform a similar function. - The broker application authenticates users, requests temporary credentials for users from AWS, and then provides them to the user to access AWS resources. ## Premium Support [What's the difference between an AWS Organizations service control policy and an IAM policy?](https://console.aws.amazon.com/console/home?nc2=h_ct&src=header-signin&hashArgs=%23) ## Blog [How to Establish Federated Access to Your AWS Resources by Using Active Directory User Attributes](https://aws.amazon.com/blogs/security/how-to-establish-federated-access-to-your-aws-resources-by-using-active-directory-user-attributes) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/inspector.md ================================================ # AWS Inspector ### Amazon Inspector rules packages and rules #### Common vulnerabilities and exposures [Common vulnerabilities and exposures](https://docs.aws.amazon.com/inspector/latest/userguide/inspector_cves.html) - The rules in this package help verify whether the EC2 instances in your assessment targets are exposed to common vulnerabilities and exposures (CVEs). Attacks can exploit unpatched vulnerabilities to compromise the confidentiality, integrity, or availability of your service or data. The CVE system provides a reference method for publicly known information security vulnerabilities and exposures. [Security best practices for Amazon Inspector](https://docs.aws.amazon.com/inspector/latest/userguide/inspector_security-best-practices.html) - Use Amazon Inspector rules to help determine whether your systems are configured securely. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/kinesis.md ================================================ # Kinesis [CheatSheet - Amazon Kinesis](https://tutorialsdojo.com/amazon-kinesis) ## Getting started with Amazon Kinesis Data Streams [Getting started with Amazon Kinesis Data Streams ](https://aws.amazon.com/kinesis/data-streams/getting-started) - Amazon Kinesis Data Streams is a massively scalable, highly durable data ingestion and processing service optimized for streaming data. - You can configure hundreds of thousands of data producers to continuously put data into a Kinesis data stream. - Data will be available within milliseconds to your Amazon Kinesis applications, and those applications will receive data records in the order they were generated. ## What Is Amazon Kinesis Data Streams? [What Is Amazon Kinesis Data Streams?](https://docs.aws.amazon.com/streams/latest/dev/introduction.html) - You can use Amazon Kinesis Data Streams to collect and process large streams of data records in real time. - You can create data-processing applications, known as Kinesis Data Streams applications. A typical Kinesis Data Streams application reads data from a data stream as data records. - You can send the processed records to dashboards, use them to generate alerts, dynamically change pricing and advertising strategies, or send data to a variety of other AWS services. ### What Can I Do with Kinesis Data Streams? [What Can I Do with Kinesis Data Streams?](https://docs.aws.amazon.com/streams/latest/dev/introduction.html) - Accelerated log and data feed intake and processing - You can have producers push data directly into a stream. - For example, push system and application logs and they are available for processing in seconds. - This prevents the log data from being lost if the front end or application server fails ## Examples - [Capturing Web Page Scroll Progress with Amazon Kinesis](https://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/kinesis-examples-capturing-page-scrolling.html) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/lambda.md ================================================ # Lambda [Cheat Sheet - AWS Lambda](https://tutorialsdojo.com/aws-lambda) [What is AWS Lambda?](https://docs.aws.amazon.com/lambda/latest/dg/welcome.html) - Lambda is a compute service that lets you run code without provisioning or managing servers. - Lambda runs your code on a high-availability compute infrastructure and performs all of the administration of the compute resources, including server and operating system maintenance, capacity provisioning and automatic scaling, code monitoring and logging. - With Lambda, you can run code for virtually any type of application or backend service. - All you need to do is supply your code in one of the languages that Lambda supports. ## Using AWS Lambda with CloudFront Lambda@Edge [Using AWS Lambda with CloudFront Lambda@Edge](https://docs.aws.amazon.com/lambda/latest/dg/lambda-edge.html) - Lambda@Edge lets you run Node.js and Python Lambda functions to customize content that CloudFront delivers, executing the functions in AWS locations closer to the viewer. - The functions run in response to CloudFront events, without provisioning or managing servers. You can use Lambda functions to change CloudFront requests and responses at the following points: - After CloudFront receives a request from a viewer (viewer request) - Before CloudFront forwards the request to the origin (origin request) - After CloudFront receives the response from the origin (origin response) - Before CloudFront forwards the response to the viewer (viewer respo ## AWS Lambda Pricing [AWS Lambda Pricing](https://aws.amazon.com/lambda/pricing) - Lambda counts a request each time it starts executing in response to an event notification trigger, such as from Amazon Simple Notification Service (SNS) or Amazon EventBridge, or an invoke call, such as from Amazon API Gateway, or via the AWS SDK, including test invokes from the AWS Console. - Duration is calculated from the time your code begins executing until it returns or otherwise terminates, rounded up to the nearest 1 ms*. The price depends on the amount of memory you allocate to your function. ## Lambda function scaling [Lambda function scaling](https://docs.aws.amazon.com/lambda/latest/dg/invocation-scaling.html#concurrent-execution-safety-limit) - The first time you invoke your function, AWS Lambda creates an instance of the function and runs its handler method to process the event. - When the function returns a response, it stays active and waits to process additional events. - If you invoke the function again while the first event is being processed, Lambda initializes another instance, and the function processes the two events concurrently - As more events come in, Lambda routes them to available instances and creates new instances as needed. - When the number of requests decreases, Lambda stops unused instances to free up scaling capacity for other functions. - The default regional concurrency limit starts at 1,000 ## Managing function ### Managing Lambda reserved concurrency [Managing Lambda reserved concurrency](https://docs.aws.amazon.com/lambda/latest/dg/configuration-concurrency.html) - Concurrency is the number of requests that your function is serving at any given time. - When your function is invoked, Lambda allocates an instance of it to process the event. - When the function code finishes running, it can handle another request. - If the function is invoked again while a request is still being processed, another instance is allocated, which increases the function's concurrency. - The total concurrency for all of the functions in your account is subject to a per-region quota. There are two types of concurrency controls available: - Reserved concurrency – Reserved concurrency guarantees the maximum number of concurrent instances for the function. When a function has reserved concurrency, no other function can use that concurrency. No charge for this - Provisioned concurrency – Provisioned concurrency initializes a requested number of execution environments so that they are prepared to respond immediately to your function's invocations. Note that configuring provisioned concurrency incurs charges to your AWS account. ## Notes - AWS Lambda is suited for creating serverless/stateless APIs and costs cheaper than AWS Fargate. ## Blogs [How do I troubleshoot Lambda function throttling with "Rate exceeded" and 429 "TooManyRequestsException" errors?](https://aws.amazon.com/premiumsupport/knowledge-center/lambda-troubleshoot-throttling) - Lambda functions are sometimes throttled to protect your resources and downstream applications. Even though Lambda automatically scales to accommodate incoming traffic, your function can still be throttled for various reasons [Understanding and Managing AWS Lambda Function Concurrency](https://aws.amazon.com/blogs/compute/managing-aws-lambda-function-concurrency) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/lex.md ================================================ # Lex [Amazon Lex](https://aws.amazon.com/lex) [Cheat Sheet - Amazon Lex](https://tutorialsdojo.com/amazon-lex) - Build chatbots with conversational AI ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/macie.md ================================================ > Revision Count: 1 # Macie [Macie](https://aws.amazon.com/macie/) [Cheat Sheet - Macie](https://tutorialsdojo.com/amazon-macie) - Amazon Macie is a fully managed data security and data privacy service that uses machine learning and pattern matching to discover and protect your sensitive data in AWS. - As organizations manage growing volumes of data, identifying and protecting their sensitive data at scale can become increasingly complex, expensive, and time-consuming. - Amazon Macie automates the discovery of sensitive data at scale and lowers the cost of protecting your data. - Macie automatically provides an inventory of Amazon S3 buckets including a list of unencrypted buckets, publicly accessible buckets, and buckets shared with AWS accounts outside those you have defined in AWS Organizations. - Then, Macie applies machine learning and pattern matching techniques to the buckets you select to identify and alert you to sensitive data, such as personally identifiable information (PII). ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/mechanicalTurk.md ================================================ # Mechanical Turk [What is Mechanical Turk](https://docs.aws.amazon.com/AWSMechTurk/latest/AWSMechanicalTurkRequester/WhatIs.html) [Cheat Sheet - Mechanical Turk](https://tutorialsdojo.com/amazon-mechanical-turk) - Is a crowdsourcing marketplace that connects you with an on-demand, scalable, human workforce to complete tasks. - Using Mechanical Turk, you can programmatically direct tasks to the Mechanical Turk marketplace, where they can be completed by workers around the world. - Mechanical Turk allows you to access the intelligence, skills, and insights of a global workforce for tasks as varied as data categorization, moderation, data collection and analysis, behavioral studies, and image annotation. - Mechanical Turk is built around the concept of microtasks, which are small, atomic tasks that workers can complete in their web browser. - When you submit work to Mechanical Turk, you typically start by breaking it into smaller tasks on which workers can work independently. - In this way, a project involving categorizing 10,000 images becomes 10,000 individual microtasks that workers can complete. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/migrationHub.md ================================================ # AWS Migration Hub [AWS Migration Services](https://tutorialsdojo.com/aws-cheat-sheets-migration-services) [What Is AWS Migration Hub?](https://docs.aws.amazon.com/migrationhub/latest/ug/whatishub.html) - AWS Migration Hub (Migration Hub) provides a single place to discover your existing servers, plan migrations, and track the status of each application migration. - The Migration Hub provides visibility into your application portfolio and streamlines planning and tracking. - You can visualize the connections and the status of the servers and databases that make up each of the applications you are migrating, regardless of which migration tool you are using. - Migration Hub supports migration status updates from the following tools: - AWS Application Migration Service (AWS MGN)–AWS Application Migration Service is the primary migration service recommended for lift-and-shift migrations to AWS - AWS Server Migration Service (AWS SMS) - AWS Database Migration Service (AWS DMS) - The ATADATA ATAmotion partner tool [Getting started with AWS Migration Hub](https://docs.aws.amazon.com/migrationhub/latest/ug/getting-started.html) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/opswork.md ================================================ ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/opsworks.md ================================================ # OpsWorks [Cheat Sheet - AWS OpsWorks](https://tutorialsdojo.com/aws-opsworks/) - AWS OpsWorks is a configuration management service that provides managed instances of Chef and Puppet. - Chef and Puppet are automation platforms that allow you to use code to automate the configurations of your servers. - OpsWorks lets you use Chef and Puppet to automate how servers are configured, deployed, and managed across your Amazon EC2 instances or on-premises compute environments. [How to set up AWS OpsWorks Stacks auto healing notifications in Amazon CloudWatch Events](https://aws.amazon.com/blogs/mt/how-to-set-up-aws-opsworks-stacks-auto-healing-notifications-in-amazon-cloudwatch-events/) - Save the following event pattern as a file named OpsWorksAutoHealingPattern.json ```json { "source": [ "aws.opsworks" ], "detail": { "initiated_by": [ "auto-healing" ] } } ``` ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/organizations.md ================================================ # Organizations [Cheat Sheet - AWS Organization](https://tutorialsdojo.com/aws-organizations) [Cheat Sheet - Multi-Account Multi-Region Data Aggregation On AWS Config ](https://tutorialsdojo.com/multi-account-multi-region-data-aggregation-on-aws-config) [service-control-policies-scp-vs-iam-policies](https://tutorialsdojo.com/service-control-policies-scp-vs-iam-policies/) [What is AWS Organizations?](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_introduction.html) - AWS Organizations is an account management service that enables you to consolidate multiple AWS accounts into an organization that you create and centrally manage. - AWS Organizations includes account management and consolidated billing capabilities that enable you to better meet the budgetary, security, and compliance needs of your business. - As an administrator of an organization, you can create accounts in your organization and invite existing accounts to join the organization. ## Using AWS Services ### Using AWS Organizations with other AWS services [Using AWS Organizations with other AWS services](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html) - You can use trusted access to enable a supported AWS service that you specify, called the trusted service, to perform tasks in your organization and its accounts on your behalf. - This involves granting permissions to the trusted service but does not otherwise affect the permissions for IAM users or roles. - When you enable access, the trusted service can create an IAM role called a service-linked role in every account in your organization whenever that role is needed. - That role has a permissions policy that allows the trusted service to do the tasks that are described in that service's documentation - The trusted service only creates service-linked roles when it needs to perform management actions on accounts, and not necessarily in all accounts of the organization. #### AWS Resource Access Manager and AWS Organizations - AWS Resource Access Manager (AWS RAM) enables you to share specified AWS resources that you own with other AWS accounts. - It's a centralized service that provides a consistent experience for sharing different types of AWS resources across multiple accounts. Service-linked roles created when you enable integration - The following service-linked role is automatically created in your organization's management account when you enable trusted access. - This role allows AWS RAM to perform supported operations within your organization's accounts in your organization. - You can delete or modify this role only if you disable trusted access between AWS RAM and Organizations, or if you remove the member account from the organization. ```bash AWSServiceRoleForResourceAccessManager ``` ## Managing Policies ### Service control policies [Service control policies (SCPs)](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scps.html) - Service control policies (SCPs) are a type of organization policy that you can use to manage permissions in your organization. - SCPs offer central control over the maximum available permissions for all accounts in your organization. - SCPs help you to ensure your accounts stay within your organization’s access control guidelines. - SCPs are available only in an organization that has all features enabled - An SCP defines a guardrail, or sets limits, on the actions that the account's administrator can delegate to the IAM users and roles in the affected accounts. - The administrator must still attach identity-based or resource-based policies to IAM users or roles, or to the resources in your accounts to actually grant permissions > AWS strongly recommends that you don't attach SCPs to the root of your organization without thoroughly testing the impact that the policy has on accounts. > SCPs do not affect any service-linked role. Service-linked roles enable other AWS services to integrate with AWS Organizations and can't be restricted by SCPs. ## Tutorial: Monitor important changes to your organization with CloudWatch Events [Tutorial: Monitor important changes to your organization with CloudWatch Events](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_tutorials_cwe.html) - You start by configuring a rule that is triggered when users invoke specific AWS Organizations operations. - Next, you configure CloudWatch Events to run an AWS Lambda function when the rule is triggered, - and you configure Amazon SNS to send an email with details about the event. ## Using other AWS Services ### AWS Config and AWS Organizations [AWS Config and AWS Organizations ](https://docs.aws.amazon.com/organizations/latest/userguide/services-that-can-integrate-config.html) - Multi-account, multi-region data aggregation in AWS Config enables you to aggregate AWS Config data from multiple accounts and AWS Regions into a single account. - An aggregator is a resource type in AWS Config that collects AWS Config data from multiple source accounts and Regions. - Create an aggregator in the Region where you want to see the aggregated AWS Config data. - While creating an aggregator, you can choose to add either individual account IDs or your organization ## Managing organizational units (OUs) [Managing organizational units (OUs)](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_ous.html) - You can use organizational units (OUs) to group accounts together to administer as a single unit. - you can attach a policy-based control to an OU, and all accounts within the OU automatically inherit the policy. - You can create multiple OUs within a single organization, and you can create OUs within other OUs. - Each OU can contain multiple accounts, and you can move accounts from one OU to another. - However, OU names must be unique within a parent OU or root. ## Blogs [What's the difference between an AWS Organizations service control policy and an IAM policy?](https://aws.amazon.com/premiumsupport/knowledge-center/iam-policy-service-control-policy) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/quickSight.md ================================================ ### Quicksight [Quicksight](https://aws.amazon.com/quicksight/) QuickSight lets you easily create and publish interactive BI dashboards as well as receive answers in seconds through natural langauge queries. QuickSight dashboards can be accessed from any device, and seamlessly embedded into your applications, portals, and websites. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/rds.md ================================================ # RDS [Cheat Sheet - RDS](https://tutorialsdojo.com/amazon-relational-database-service-amazon-rds) [RDS Read Replicas](https://aws.amazon.com/rds/features/read-replicas) - provide enhanced performance and durability for RDS database (DB) instances - They make it easy to elastically scale out beyond the capacity constraints of a single DB instance for read-heavy database workloads. - You can create one or more replicas of a given source DB Instance and serve high-volume application read traffic from multiple copies of your data - Read replicas can also be promoted when needed to become standalone DB instances - Read replicas are available in Amazon RDS for MySQL, MariaDB, PostgreSQL, Oracle, and SQL Server as well as Amazon Aurora. - You can reduce the load on your source DB instance by routing read queries from your applications to the read replica [Working with read replicas](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html) ### Multi-AZ deployments for high availability [Multi-AZ deployments for high availability](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZ.html) - Multi-AZ deployments can have one standby or two standby DB instances. - When the deployment has one standby DB instance, it's called a Multi-AZ DB instance deployment. - A Multi-AZ DB instance deployment has one standby DB instance that provides failover support, but doesn't serve read traffic. - When the deployment has two standby DB instances, it's called a Multi-AZ DB cluster deployment. - A Multi-AZ DB cluster deployment has standby DB instances that provide failover support and can also serve read traffic. ## Backing up and restoring an Amazon RDS DB instance [Backing up and restoring an Amazon RDS DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_CommonTasks.BackupRestore.html) ### Restoring a DB instance to a specified time - You can restore a DB instance to a specific point in time, creating a new DB instance. - RDS uploads transaction logs for DB instances to Amazon S3 every 5 minutes. - To see the latest restorable time for a DB instance, use the AWS CLI describe-db-instances command and look at the value returned in the LatestRestorableTime field for the DB instance. - To see the latest restorable time for each DB instance in the Amazon RDS console, choose Automated backups. ### Amazon RDS DB instance storage [Amazon RDS DB instance storage](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html) - DB instances for Amazon RDS for MySQL, MariaDB, PostgreSQL, Oracle, and Microsoft SQL Server use Amazon Elastic Block Store (Amazon EBS) volumes for database and log storage. - Amazon RDS provides three storage types: General Purpose SSD (also known as gp2), Provisioned IOPS SSD (also known as io1), and magnetic (also known as standard) - General Purpose SSD – General Purpose SSD volumes offer cost-effective storage that is ideal for a broad range of workloads. - Provisioned IOPS – Provisioned IOPS storage is designed to meet the needs of I/O-intensive workloads, particularly database workloads, that require low I/O latency and consistent I/O throughput. - Magnetic – Amazon RDS also supports magnetic storage for backward compatibility. We recommend that you use General Purpose SSD or Provisioned IOPS for any new storage needs. ### Setting the JVM TTL for DNS name lookups - The failover mechanism automatically changes the Domain Name System (DNS) record of the DB instance to point to the standby DB instance. As a result, you need to re-establish any existing connections to your DB instance. In a Java virtual machine (JVM) environment, due to how the Java DNS caching mechanism works, you might need to reconfigure JVM settings. - Because AWS resources use DNS name entries that occasionally change, we recommend that you configure your JVM with a TTL value of no more than 60 seconds. ## NOTES - The Amazon RDS MySQL does not have a single reader endpoint for read replicas. You must use Amazon Aurora for MySQL to support this. Creating read replicas is recommended to increase the read performance of an RDS cluster. - Amazon RDS does not support certain features in Oracle such as Multitenant Database, Real Application Clusters (RAC), Unified Auditing, Database Vault, Recovery Manager (RMAN) ## Blogs - [Understanding Burst vs. Baseline Performance with Amazon RDS and GP2](https://aws.amazon.com/blogs/database/understanding-burst-vs-baseline-performance-with-amazon-rds-and-gp2/) - [How to use CloudWatch metrics to decide between General Purpose or Provisioned IOPS for your RDS database](https://aws.amazon.com/blogs/database/how-to-use-cloudwatch-metrics-to-decide-between-general-purpose-or-provisioned-iops-for-your-rds-database) - [Amazon RDS Multi-AZ Deployments](https://aws.amazon.com/rds/features/multi-az) - [Amazon RDS – Multi-AZ Deployments For Enhanced Availability & Reliability](https://aws.amazon.com/blogs/aws/amazon-rds-multi-az-deployment) - [Implementing a disaster recovery strategy with Amazon RDS](https://aws.amazon.com/blogs/database/implementing-a-disaster-recovery-strategy-with-amazon-rds) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/redShift.md ================================================ # RedShift ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/rekognition.md ================================================ # Rekognition [CheatSheet - AWS Rekognition](https://tutorialsdojo.com/amazon-rekognition) [What is Amazon Rekognition?](https://docs.aws.amazon.com/rekognition/latest/dg/what-is.html) ## What is Amazon Rekognition? - Amazon Rekognition makes it easy to add image and video analysis to your applications. You just provide an image or video to the Amazon Rekognition API, and the service can identify objects, people, text, scenes, and activities. - It can detect any inappropriate content as well ## Searching faces in a collection [Searching faces in a collection](https://docs.aws.amazon.com/rekognition/latest/dg/collections.html) - Amazon Rekognition can store information about detected faces in server-side containers known as collections - You can use the facial information that's stored in a collection to search for known faces in images, stored videos, and streaming videos. ## Text detection - Extract skewed and distorted text from images and videos of street signs, social media posts, and product packaging. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/resourceAccessManager.md ================================================ # Resource Access Manager [What is AWS Resource Access Manager?](https://docs.aws.amazon.com/ram/latest/userguide/what-is.html) - AWS Resource Access Manager (AWS RAM) helps you securely share the AWS resources that you create in one AWS account with other AWS accounts - If you have multiple AWS accounts, you can create a resource once and use AWS RAM to make that resource accessible to those other accounts. - If your account is managed by AWS Organizations, then you can share resources with all of the other accounts in the organization, or only those contained by one or more specified organizational units (OUs). - You can also share with specific AWS accounts by account ID, regardless of whether the account is part of an organization. ## Sharing your AWS resources [Sharing your AWS resources](https://docs.aws.amazon.com/ram/latest/userguide/getting-started-sharing.html) - Enable resource sharing within AWS Organizations (optional) ```bash aws ram enable-sharing-with-aws-organization ``` ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/route53.md ================================================ # Route53 [Cheat Sheet - AWS Route53](https://tutorialsdojo.com/amazon-route-53) [Cheat Sheet - AWS Database Migration Service](https://tutorialsdojo.com/aws-database-migration-service) ## Working with hosted zones [Working with hosted zones](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/hosted-zones-working-with.html) - A hosted zone is a container for records, and records contain information about how you want to route traffic for a specific domain, such as example.com, and its subdomains (acme.example.com, zenith.example.com). A hosted zone and the corresponding domain have the same name. - Public hosted zones contain records that specify how you want to route traffic on the internet. - Private hosted zones contain records that specify how you want to route traffic in an Amazon VPC [How do I associate a Route 53 private hosted zone with a VPC in a different AWS account or Region? ](https://aws.amazon.com/premiumsupport/knowledge-center/route53-private-hosted-zone) ### Working with a private hosted zone #### Associating more VPCs with a private hosted zone [Associating more VPCs with a private hosted zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/hosted-zone-private-associate-vpcs.html) - You can use the Amazon Route 53 console to associate more VPCs with a private hosted zone if you created the hosted zone and the VPCs by using the same AWS account. #### Associating an Amazon VPC and a private hosted zone that you created with different AWS accounts [Associating an Amazon VPC and a private hosted zone that you created with different AWS accounts](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/hosted-zone-private-associate-vpcs-different-accounts.html) ## Routing internet traffic to your AWS resources ### Routing traffic to an ELB load balancer - To route domain traffic to an ELB load balancer, use Amazon Route 53 to create an alias record that points to your load balancer. - An alias record is a Route 53 extension to DNS. It's similar to a CNAME record, but you can create an alias record both for the root domain, such as example.com, and for subdomains, such as www.example.com. ### Routing traffic to a website that is hosted in an Amazon S3 bucket [Routing traffic to a website that is hosted in an Amazon S3 bucket](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/RoutingToS3Bucket.html) - To route domain traffic to an S3 bucket, use Amazon Route 53 to create an alias record that points to your bucket. ### Configuring a static website using a custom domain registered with Route 53 [Configuring a static website using a custom domain registered with Route 53](https://docs.aws.amazon.com/AmazonS3/latest/userguide/website-hosting-custom-domain-walkthrough.html) ## Creating Amazon Route 53 health checks and configuring DNS failover [Creating Amazon Route 53 health checks and configuring DNS failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) - Amazon Route 53 health checks monitor the health and performance of your web applications, web servers, and other resources. Each health check that you create can monitor one of the following: - The health of a specified resource, such as a web server. - The status of other health checks. - The status of an Amazon CloudWatch alarm. - With Amazon Route 53 Application Recovery Controller, you can set up routing control health checks with DNS failover records to manage traffic failover for your application. ## Choosing a routing policy [Choosing a routing policy](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html) - Simple routing policy – Use for a single resource that performs a given function for your domain, for example, a web server that serves content for the example.com website. - Failover routing policy – Use when you want to configure active-passive failover. - Geolocation routing policy – Use when you want to route traffic based on the location of your users. - Geoproximity routing policy – Use when you want to route traffic based on the location of your resources and, optionally, shift traffic from resources in one location to resources in another. - Latency routing policy – Use when you have resources in multiple AWS Regions and you want to route traffic to the Region that provides the best latency with less round-trip time. - Multivalue answer routing policy – Use when you want Route 53 to respond to DNS queries with up to eight healthy records selected at random. - Weighted routing policy – Use to route traffic to multiple resources in proportions that you specify ### Configuring DNSSEC for a domain [Configuring DNSSEC for a domain](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/domain-configure-dnssec.html) - Attackers sometimes hijack traffic to internet endpoints such as web servers by intercepting DNS queries and returning their own IP addresses to DNS resolvers in place of the actual IP addresses for those endpoints - Users are then routed to the IP addresses provided by the attackers in the spoofed response, for example, to fake websites. - You can protect your domain from this type of attack, known as DNS spoofing or a man-in-the-middle attack, by configuring Domain Name System Security Extensions (DNSSEC), a protocol for securing DNS traffic. ## Configuring DNS Failover ### Active-active and active-passive failover [Active-active and active-passive failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-types.html) - You can use Route 53 health checking to configure active-active and active-passive failover configurations. - You configure active-active failover using any routing policy (or combination of routing policies) other than failover, - and you configure active-passive failover using the failover routing policy. ## Resolving DNS queries between VPCs and your network [Resolving DNS queries between VPCs and your network](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resolver.html) - When you create a VPC using Amazon VPC, Route 53 Resolver automatically uses a Resolver on the VPC to answer DNS queries for local Amazon VPC domain names for EC2 instances (ec2-192-0-2-44.compute-1.amazonaws.com) and records in private hosted zones (acme.example.com). - For all other domain names, Resolver performs recursive lookups against public name servers. - You also can integrate DNS resolution between Resolver and DNS resolvers on your network by configuring forwarding rules. Your network can include any network that is reachable from your VPC, such as the following: - The VPC itself - Another peered VPC - An on-premises network that is connected to AWS with AWS Direct Connect, a VPN, or a network address translation (NAT) gateway - Before you start to forward queries, you create Resolver inbound and/or outbound endpoints in the connected VPC. These endpoints provide a path for inbound or outbound queries: - Inbound endpoint: DNS resolvers on your network can forward DNS queries to Route 53 Resolver via this endpoint - This allows your DNS resolvers to easily resolve domain names for AWS resources such as EC2 instances or records in a Route 53 private hosted zone. - Outbound endpoint: Resolver conditionally forwards queries to resolvers on your network via this endpoint - To forward selected queries, you create Resolver rules that specify the domain names for the DNS queries that you want to forward (such as example.com), and the IP addresses of the DNS resolvers on your network that you want to forward the queries to [How do I configure a Route 53 Resolver inbound endpoint to resolve DNS records in my private hosted zone from my remote network?](https://aws.amazon.com/premiumsupport/knowledge-center/route53-resolve-with-inbound-endpoint) ### Values specific for simple alias records [Values specific for simple alias records](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-values-alias.html) - When you create alias records, you specify the following values - Routing policy - Record name - Value/route traffic to - Record type - Evaluate target health ### Choosing between alias and non-alias records [Choosing between alias and non-alias records](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-choosing-alias-non-alias.html) - Amazon Route 53 alias records provide a Route 53–specific extension to DNS functionality. - Alias records let you route traffic to selected AWS resources, such as CloudFront distributions and Amazon S3 buckets. - They also let you route traffic from one record in a hosted zone to another record. - Unlike a CNAME record, you can create an alias record at the top node of a DNS namespace, also known as the zone apex. ## Blogs [Simplify DNS management in a multi-account environment with Route 53 Resolver](https://aws.amazon.com/blogs/security/simplify-dns-management-in-a-multiaccount-environment-with-route-53-resolver) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/s3.md ================================================ # S3 [Cheat Sheet - AWS S3](https://tutorialsdojo.com/amazon-s3) [Cheat Sheet - Amazon Glacier](https://tutorialsdojo.com/amazon-sqs) [Cheat Sheet - s3-pre-signed-urls-vs-cloudfront-signed-urls-vs-origin-access-identity-oai](https://tutorialsdojo.com/s3-pre-signed-urls-vs-cloudfront-signed-urls-vs-origin-access-identity-oai) [Cheat Sheet - s3-transfer-acceleration-vs-direct-connect-vs-vpn-vs-snowball-vs-snowmobile](https://tutorialsdojo.com/s3-transfer-acceleration-vs-direct-connect-vs-vpn-vs-snowball-vs-snowmobile) [What is Amazon S3?](https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html) - Amazon Simple Storage Service (Amazon S3) is an object storage service that offers industry-leading scalability, data availability, security, and performance. [object-lifecycle-mgmt](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) - storing logs in Amazon S3, and use lifecycle policies to archive to Amazon Glacier ## Amazon S3 Event Notifications [Amazon S3 Event Notifications](https://docs.aws.amazon.com/AmazonS3/latest/userguide/NotificationHowTo.html) - You can use the Amazon S3 Event Notifications feature to receive notifications when certain events happen in your S3 bucket. - To enable notifications, you must first add a notification configuration that identifies the events you want Amazon S3 to publish and the destinations where you want Amazon S3 to send the notifications. - You store this configuration in the notification subresource that is associated with a bucket. [Tutorial: Using an Amazon S3 trigger to invoke a Lambda function](https://docs.aws.amazon.com/lambda/latest/dg/with-s3-example.html#with-s3-example-configure-event-source) ### Replicating objects [Replicating objects](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html) - Replication enables automatic, asynchronous copying of objects across Amazon S3 buckets - Buckets that are configured for object replication can be owned by the same AWS account or by different accounts. - Objects may be replicated to a single destination bucket or multiple destination buckets. - By default, replication only supports copying new Amazon S3 objects after it is enabled. - You can use replication to copy existing objects and clone them to a different bucket, but in order to do so, you must contact AWS Support Center ## Hosting a static website using Amazon S3 [Hosting a static website using Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteHosting.html) - You can use Amazon S3 to host a static website. On a static website, individual webpages include static content. They might also contain client-side scripts. ## Server Side Encryption ### Protecting data using server-side encryption [Protecting data using server-side encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/serv-side-encryption.html) - Server-side encryption is the encryption of data at its destination by the application or service that receives it. - Amazon S3 encrypts your data at the object level as it writes it to disks in its data centers and decrypts it for you when you access it. - As long as you authenticate your request and you have access permissions, there is no difference in the way you access encrypted or unencrypted objects. You have three mutually exclusive options, depending on how you choose to manage the encryption keys. - Server-Side Encryption with Amazon S3-Managed Keys (SSE-S3) - Server-Side Encryption with KMS keys Stored in AWS Key Management Service (SSE-KMS) - Server-Side Encryption with Customer-Provided Keys (SSE-C) ### Protecting data using server-side encryption with customer-provided encryption keys (SSE-C) [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) - Server-side encryption is about protecting data at rest. - Server-side encryption encrypts only the object data, not object metadata. - Using server-side encryption with customer-provided encryption keys (SSE-C) allows you to set your own encryption keys - The only thing you do is manage the encryption keys you provide. ### Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3) [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingServerSideEncryption.html) - Server-side encryption protects data at rest. - Amazon S3 encrypts each object with a unique key. - As an additional safeguard, it encrypts the key itself with a key that it rotates regularly. ## Working with Buckets ### Using Requester Pays buckets for storage transfers and usage [Using Requester Pays buckets for storage transfers and usage](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html) - In general, bucket owners pay for all Amazon S3 storage and data transfer costs that are associated with their bucket. - However, you can configure a bucket to be a Requester Pays bucket. - With Requester Pays buckets, the requester instead of the bucket owner pays the cost of the request and the data download from the bucket. ### Using versioning in S3 buckets [Using versioning in S3 buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/Versioning.html) - Versioning in Amazon S3 is a means of keeping multiple variants of an object in the same bucket. - You can use the S3 Versioning feature to preserve, retrieve, and restore every version of every object stored in your buckets. ### Configuring fast, secure file transfers using Amazon S3 Transfer Acceleration [Configuring fast, secure file transfers using Amazon S3 Transfer Acceleration](https://docs.aws.amazon.com/AmazonS3/latest/userguide/transfer-acceleration.html) - Amazon S3 Transfer Acceleration is a bucket-level feature that enables fast, easy, and secure transfers of files over long distances between your client and an S3 bucket. - Transfer Acceleration takes advantage of the globally distributed edge locations in Amazon CloudFront. - As the data arrives at an edge location, the data is routed to Amazon S3 over an optimized network path. **Why use Transfer Acceleration?** - Your customers upload to a centralized bucket from all over the world. - You transfer gigabytes to terabytes of data on a regular basis across continents. - You can't use all of your available bandwidth over the internet when uploading to Amazon S3. ### Uploading and copying objects using multipart upload [Uploading and copying objects using multipart upload](https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html) [How can I optimize performance when I upload large files to Amazon S3?](https://aws.amazon.com/premiumsupport/knowledge-center/s3-upload-large-files) - Multipart upload allows you to upload a single object as a set of parts. - Each part is a contiguous portion of the object's data. - You can upload these object parts independently and in any order - If transmission of any part fails, you can retransmit that part without affecting other parts. - After all parts of your object are uploaded, Amazon S3 assembles these parts and creates the object. - In general, when your object size reaches 100 MB, you should consider using multipart uploads instead of uploading the object in a single operation. ## Uploading objects using presigned URLs [Uploading objects using presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/PresignedUrlUploadObject.html) - A presigned URL gives you access to the object identified in the URL, provided that the creator of the presigned URL has permissions to access that object. - All objects and buckets by default are private. - The presigned URLs are useful if you want your user/customer to be able to upload a specific object to your bucket, but you don't require them to have AWS security credentials or permissions. ## Managing your storage lifecycle [Managing your storage lifecycle](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) ## Amazon S3 Glacier storage classes [Amazon S3 Glacier storage classes](https://aws.amazon.com/s3/storage-classes/glacier) - The Amazon S3 Glacier storage classes are purpose-built for data archiving, providing you with the highest performance, most retrieval flexibility, and the lowest cost archive storage in the cloud ## Notes [I accidentally denied everyone access to my Amazon S3 bucket. How do I regain access?](https://aws.amazon.com/premiumsupport/knowledge-center/s3-accidentally-denied-access/) - To get access to your bucket again, sign in to the Amazon S3 console as the AWS account root user, and then delete the bucket policy. #### put-bucket-policy [put-bucket-policy](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-bucket-policy.html) - This example allows all users to retrieve any object in MyBucket except those in the MySecretFolder. It also grants put and delete permission to the root user of the AWS account 1234-5678-9012 ```bash aws s3api put-bucket-policy --bucket MyBucket --policy file://policy.json ``` ```json { "Statement": [ { "Effect": "Allow", "Principal": "*", "Action": "s3:GetObject", "Resource": "arn:aws:s3:::MyBucket/*" }, { "Effect": "Deny", "Principal": "*", "Action": "s3:GetObject", "Resource": "arn:aws:s3:::MyBucket/MySecretFolder/*" }, { "Effect": "Allow", "Principal": { "AWS": "arn:aws:iam::123456789012:root" }, "Action": [ "s3:DeleteObject", "s3:PutObject" ], "Resource": "arn:aws:s3:::MyBucket/*" } ] } ``` ### Managing Storage #### [How can I retrieve an Amazon S3 object that was deleted in a versioning-enabled bucket?](https://aws.amazon.com/premiumsupport/knowledge-center/s3-undelete-configuration/) - When you delete an object from a version-enabled bucket, Amazon S3 creates a delete marker for the object. The delete marker becomes the current version of the object, and the actual object becomes the previous version. With a delete marker, Amazon S3 responds to requests for the object as though the object was deleted. For example, if you send a GET request for the object, then Amazon S3 returns an error. ## Blogs - [Building and Maintaining an Amazon S3 Metadata Index without Servers](https://aws.amazon.com/blogs/big-data/building-and-maintaining-an-amazon-s3-metadata-index-without-servers) - Amazon S3 is a simple key-based object store whose scalability and low cost make it ideal for storing large datasets - Its design enables S3 to provide excellent performance for storing and retrieving objects based on a known key. - Finding objects based on other attributes, however, requires doing a linear search using the LIST operation. - Because each listing can return at most 1000 keys, it may require many requests before finding the object. - Because of these additional requests, implementing attribute-based queries in S3 alone can be challenging. - A common solution is to build an external index that maps queryable attributes to the S3 object key - approach for building such an index using Amazon DynamoDB and AWS Lambda ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/sageMaker.md ================================================ # SageMaker [Cheat Sheet - SageMaker](https://tutorialsdojo.com/amazon-sagemaker) ## What Is Amazon SageMaker? [What Is Amazon SageMaker?](https://docs.aws.amazon.com/sagemaker/latest/dg/whatis.html) - Amazon SageMaker is a fully managed machine learning service. - With SageMaker, data scientists and developers can quickly and easily build and train machine learning models, and then directly deploy them into a production-ready hosted environment - It provides an integrated Jupyter authoring notebook instance for easy access to your data sources for exploration and analysis, so you don't have to manage servers. - It also provides common machine learning algorithms that are optimized to run efficiently against extremely large data in a distributed environment. ## Blogs - [Enable self-service, secured data science using Amazon SageMaker notebooks and AWS Service Catalog](https://aws.amazon.com/blogs/mt/enable-self-service-secured-data-science-using-amazon-sagemaker-notebooks-and-aws-service-catalog) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/schemaConversionTool.md ================================================ # Schema Conversion Tool [What Is the AWS Schema Conversion Tool?](https://docs.aws.amazon.com/SchemaConversionTool/latest/userguide/CHAP_Welcome.html) - You can use the AWS Schema Conversion Tool (AWS SCT) to convert your existing database schema from one database engine to another. - You can convert relational OLTP schema, or data warehouse schema. - Your converted schema is suitable for an Amazon Relational Database Service (Amazon RDS) MySQL, MariaDB, Oracle, SQL Server, PostgreSQL DB, an Amazon Aurora DB cluster, or an Amazon Redshift cluster. - The converted schema can also be used with a database on an Amazon EC2 instance or stored as data on an Amazon S3 bucket. ## Migrating data from an on-premises data warehouse to Amazon Redshift - You can use an AWS SCT agent to extract data from your on-premises data warehouse and migrate it to Amazon Redshift. The agent extracts your data and uploads the data to either Amazon S3 - or, for large-scale migrations, an AWS Snowball Edge device. - You can then use AWS SCT to copy the data to Amazon Redshift. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/secretsManager.md ================================================ # Secrets Manager [Cheat Sheet - Secrets Manager](https://tutorialsdojo.com/aws-secrets-manager/) - AWS Secrets Manager helps you protect secrets needed to access your applications, services, and IT resources. - The service enables you to easily rotate, manage, and retrieve database credentials, API keys, and other secrets throughout their lifecycle. - Users and applications retrieve secrets with a call to Secrets Manager APIs, eliminating the need to hardcode sensitive information in plain text. - Secrets Manager offers secret rotation with built-in integration for Amazon RDS, Amazon Redshift, and Amazon DocumentDB. - Also, the service is extensible to other types of secrets, including API keys and OAuth tokens ## Automate secret creation in AWS CloudFormation [Automate secret creation in AWS CloudFormation](https://docs.aws.amazon.com/secretsmanager/latest/userguide/integrating_cloudformation.html) [How to create and retrieve secrets managed in AWS Secrets Manager using AWS CloudFormation templates](https://aws.amazon.com/blogs/security/how-to-create-and-retrieve-secrets-managed-in-aws-secrets-manager-using-aws-cloudformation-template) - You can use AWS CloudFormation to create and reference secrets from within your AWS CloudFormation stack template. - You can create a secret and then reference it from another part of the template. - For example, you can retrieve the user name and password from the new secret and then use that to define the user name and password for a new database. - You can create and attach resource-based policies to a secret. - You can also configure rotation by defining a Lambda function in your template and associating the function with your new secret as its rotation Lambda function. - Secrets Manager provides the following resource types that you can use to create secrets in an AWS CloudFormation template: - AWS::SecretsManager::Secret – Creates a secret and stores it in Secrets Manager. You can specify a password or Secrets Manager can generate one for you. - AWS::SecretsManager::ResourcePolicy – Creates a resource-based policy and attaches it to the secret. A resource-based policy controls who can perform actions on the secret. - AWS::SecretsManager::RotationSchedule – Configures a secret to perform automatic periodic rotation using the specified Lambda rotation function. - AWS::SecretsManager::SecretTargetAttachment – Configures the secret with the details about the service or database that Secrets Manager needs to rotate the secret. For example, for an Amazon RDS DB instance, Secrets Manager adds the connection details and database engine type as entries in the SecureString property of the secret. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/securityTokenService.md ================================================ # Security Token Service ## Actions ### AssumeRole [AssumeRole](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) - Returns a set of temporary security credentials that you can use to access AWS resources that you might not normally have access to. - These temporary credentials consist of an access key ID, a secret access key, and a security token. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/serverMigrationService.md ================================================ # Server Migration Service [Cheat Sheet - SMS](https://tutorialsdojo.com/aws-server-migration-service-sms) [What is AWS Server Migration Service?](https://docs.aws.amazon.com/server-migration-service/latest/userguide/server-migration.html) [AWS Server Migration Service – Server Migration to the Cloud Made Easy](https://aws.amazon.com/blogs/apn/aws-server-migration-service-server-migration-to-the-cloud-made-easy) - AWS Server Migration Service (AWS SMS) automates the migration of your on-premises VMware vSphere, Microsoft Hyper-V/SCVMM, and Azure virtual machines to the AWS Cloud. - AWS SMS incrementally replicates your server VMs as cloud-hosted Amazon Machine Images (AMIs) ready for deployment on Amazon EC2. Working with AMIs, you can easily test and update your cloud-based images before deploying them in production. ## Migrate Application [Migrate applications using AWS SMS](https://docs.aws.amazon.com/server-migration-service/latest/userguide/application-migration.html) - AWS Server Migration Service supports the automated migration of multi-server application stacks from your on-premises data center to Amazon EC2 - Where server migration is accomplished by replicating a single server as an Amazon Machine Image (AMI), application migration replicates all of the servers in an application as AMIs and generates an AWS CloudFormation template to launch them in a coordinated fashion. [Using Amazon CloudWatch Events and AWS Lambda with AWS SMS](https://docs.aws.amazon.com/server-migration-service/latest/userguide/cwe-sms.html) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/serverlessApplicationModel.md ================================================ # Serverless Applicaton Model [CheatSheet - AWS Serverless Application Model](https://tutorialsdojo.com/aws-serverless-application-model-sam) ## What is the AWS Serverless Application Model (AWS SAM)? [What is the AWS Serverless Application Model (AWS SAM)?](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/what-is-sam.html) - The AWS Serverless Application Model (AWS SAM) is an open-source framework that you can use to build serverless applications on AWS. ### Deploying serverless applications gradually - If you use AWS SAM to create your serverless application, it comes built-in with CodeDeploy to provide gradual Lambda deployments. With just a few lines of configuration, AWS SAM does the following for you: - Deploys new versions of your Lambda function, and automatically creates aliases that point to the new version. - Gradually shifts customer traffic to the new version until you're satisfied that it's working as expected, or you roll back the update. - Defines pre-traffic and post-traffic test functions to verify that the newly deployed code is configured correctly and your application operates as expected. - Rolls back the deployment if CloudWatch alarms are triggered. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/serviceCatalog.md ================================================ # AWS Service Catalog [What Is AWS Service Catalog?](https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html) [Cheat Sheet - AWS Service Catalog](https://tutorialsdojo.com/aws-service-catalog) - AWS Service Catalog enables organizations to create and manage catalogs of IT services that are approved for AWS. - These IT services can include everything from virtual machine images, servers, software, databases, and more to complete multi-tier application architectures. - AWS Service Catalog allows organizations to centrally manage commonly deployed IT services, and helps organizations achieve consistent governance and meet compliance requirements. - End users can quickly deploy only the approved IT services they need, following the constraints set by your organization. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/shield.md ================================================ # Shield [AWS Shield](https://docs.aws.amazon.com/waf/latest/developerguide/shield-chapter.html) [Cheat Sheet - AWS Shield](https://tutorialsdojo.com/aws-shield) - AWS Shield is a managed Distributed Denial of Service (DDoS) protection service that safeguards applications running on AWS. - AWS Shield provides always-on detection and automatic inline mitigations that minimize application downtime and latency - There are two tiers of AWS Shield - Standard and Advanced. - All AWS customers benefit from the automatic protections of AWS Shield Standard, at no additional charge - AWS Shield Standard defends against most common, frequently occurring network and transport layer DDoS attacks that target your web site or applications. - When you use AWS Shield Standard with Amazon CloudFront and Amazon Route 53, you receive comprehensive availability protection against all known infrastructure (Layer 3 and 4) attacks. - For higher levels of protection against attacks targeting your applications running on Amazon Elastic Compute Cloud (EC2), Elastic Load Balancing (ELB), Amazon CloudFront, AWS Global Accelerator and Amazon Route 53 resources, you can subscribe to AWS Shield Advanced. [AWS Shield - Managed DDoS protection](https://aws.amazon.com/shield/?whats-new-cards.sort-by=item.additionalFields.postDateTime&whats-new-cards.sort-order=desc) - AWS provides AWS Shield Standard and AWS Shield Advanced for protection against DDoS attacks. - AWS Shield Standard is automatically included at no extra cost beyond what you already pay for AWS WAF and your other AWS services. - You can add Shield Advanced protection for any of the following resource types: - Amazon CloudFront distributions - Amazon Route 53 hosted zones - AWS Global Accelerator accelerators - Application Load Balancers - Elastic Load Balancing (ELB) load balancers - Amazon Elastic Compute Cloud (Amazon EC2) Elastic IP addresses > There are two types of AWS Shield: the Standard one which is free and the Advanced type which has an additional cost of around $3,000 per month. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/simpleNotificationService.md ================================================ # Simple Notification Service [Cheat Sheet - Simple Notification Service](https://tutorialsdojo.com/amazon-sns) [Mobile push notifications](https://docs.aws.amazon.com/sns/latest/dg/sns-mobile-application-as-subscriber.html) - With Amazon SNS, you have the ability to send push notification messages directly to apps on mobile devices. - Push notification messages sent to a mobile endpoint can appear in the mobile app as message alerts, badge updates, or even sound alerts. ## What is Amazon SNS? [What is Amazon SNS?](https://docs.aws.amazon.com/sns/latest/dg/welcome.html) - Amazon Simple Notification Service (Amazon SNS) is a managed service that provides message delivery from publishers to subscribers (also known as producers and consumers). Publishers communicate asynchronously with subscribers by sending messages to a topic, which is a logical access point and communication channel. Clients can subscribe to the SNS topic and receive published messages using a supported endpoint type, such as Amazon Kinesis Data Firehose, Amazon SQS, AWS Lambda, HTTP, email, mobile push notifications, and mobile text messages (SMS). ## Message Filtering [SNS message filtering](https://docs.aws.amazon.com/sns/latest/dg/sns-message-filtering.html) - By default, an Amazon SNS topic subscriber receives every message published to the topic. To receive a subset of the messages, a subscriber must assign a filter policy to the topic subscription. - A filter policy is a simple JSON object containing attributes that define which messages the subscriber receives. When you publish a message to a topic, Amazon SNS compares the message attributes to the attributes in the filter policy for each of the topic's subscriptions. If any of the attributes match, Amazon SNS sends the message to the subscriber ### Subscription Filter Policies [Amazon SNS subscription filter policies](https://docs.aws.amazon.com/sns/latest/dg/sns-subscription-filter-policies.html) [Enriching Event-Driven Architectures with AWS Event Fork Pipelines](https://aws.amazon.com/blogs/compute/enriching-event-driven-architectures-with-aws-event-fork-pipelines/) - A subscription filter policy allows you to specify attribute names and assign a list of values to each attribute name. For more information, see Amazon SNS message filtering. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/simpleQueueService.md ================================================ # Simple Queue Service [Cheat Sheet - Simple Queue Service](https://tutorialsdojo.com/amazon-sqs) ## What is Amazon Simple Queue Service? [What is Amazon Simple Queue Service?](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/welcome.html) - Amazon Simple Queue Service (Amazon SQS) offers a secure, durable, and available hosted queue that lets you integrate and decouple distributed software systems and components. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/simpleWorkflowService.md ================================================ # Simple Workflow Service [Simple Workflow Service](https://aws.amazon.com/swf/) [Cheat Sheet - SWF](https://tutorialsdojo.com/amazon-simple-workflow-amazon-swf) - Amazon SWF helps developers build, run, and scale background jobs that have parallel or sequential steps - You can think of Amazon SWF as a fully-managed state tracker and task coordinator in the Cloud. ## FAQs [FAQ - SWF](https://aws.amazon.com/swf/faqs/) [](https://aws.amazon.com/swf/faqs/#:~:text=Processing%20large%20product%20catalogs%20using%20Amazon%20Mechanical%20Turk) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/singleSignOn.md ================================================ > Revision Count: 1 # Single Sign-On ## What is AWS Single Sign-On? [What is AWS Single Sign-On?](https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) - AWS Single Sign-On is a cloud-based single sign-on (SSO) service that makes it easy to centrally manage SSO access to all of your AWS accounts and cloud applications - Specifically, it helps you manage SSO access and user permissions across all your AWS accounts in AWS Organizations. - AWS SSO-integrated applications as well as custom applications that support Security Assertion Markup Language (SAML) 2.0. ## Connect to your Microsoft AD directory [Connect to your Microsoft AD directory](https://docs.aws.amazon.com/singlesignon/latest/userguide/manage-your-identity-source-ad.html) - With AWS Single Sign-On, administrators can connect their self-managed Active Directory (AD) or their AWS Managed Microsoft AD directory using AWS Directory Service. - This Microsoft AD directory defines the pool of identities that administrators can pull from when using the AWS SSO console to assign single sign-on (SSO) access. - After connecting their corporate directory to AWS SSO, administrators can then grant their AD users or groups access to AWS accounts, cloud applications, or both. ## Notes - AWS SSO supports single sign-on to business applications through web browsers only. - AWS SSO supports only SAML 2.0–based applications so an OpenID Connect-compatible solution will not work here ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/snowball.md ================================================ # Snowball [Migrate Petabyte-Scale Data to the Cloud](https://aws.amazon.com/getting-started/projects/migrate-petabyte-scale-data/services-costs) - Snowball is a petabyte-scale data transport solution that uses secure appliances to transfer large amounts of data into and out of the AWS cloud. [Performance for AWS Snowball](https://docs.aws.amazon.com/snowball/latest/ug/performance.html) - Speeding Up Data Transfer ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/snowballEdge.md ================================================ # Snowball Edge [Cheat Sheet - AWS Snowball Edge](https://tutorialsdojo.com/aws-snowball-edge) ## Transferring Files Using the Amazon S3 Interface ### Batching Small Files [Batching Small Files](https://docs.aws.amazon.com/snowball/latest/developer-guide/batching-small-files.html) - Each copy operation has some overhead because of encryption - To speed up the process of transferring small files to your AWS Snowball Edge device, you can batch them together in a single archive. - When you batch files together, they can be auto-extracted when they are imported into Amazon S3, if they were batched in one of the supported archive formats. ## Best Practices [Best Practices](https://docs.aws.amazon.com/snowball/latest/developer-guide/BestPractices.html) Speeding Up Data Transfer ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/storageGateway.md ================================================ # Storage Gateway [Cheat Sheet - AWS Storage Gateway](https://tutorialsdojo.com/aws-storage-gateway) [How Storage Gateway works (architecture)](https://docs.aws.amazon.com/storagegateway/latest/userguide/StorageGatewayConcepts.html) - AWS Storage Gateway connects an on-premises software appliance with cloud-based storage to provide seamless integration with data security features between your on-premises IT environment and the AWS storage infrastructure. - You can use the service to store data in the Amazon Web Services Cloud for scalable and cost-effective storage that helps maintain data security. - AWS Storage Gateway offers file-based file gateways (Amazon S3 File and Amazon FSx File), volume-based (Cached and Stored), and tape-based storage solutions [Storage Gateway](https://aws.amazon.com/storagegateway) - AWS Storage Gateway is a set of hybrid cloud storage services that provide on-premises access to virtually unlimited cloud storage. - Deliver low-latency data access to on-premises applications while leveraging the agility, economics and security capabilities of AWS in the cloud. - Provide on-premises applications access to cloud-backed storage without disruption to your business by maintaining user and application workflows. - Offer virtually unlimited cloud storage to users and applications without deploying new storage hardware. ### Amazon S3 File Gateway [Amazon S3 File Gateway](https://docs.aws.amazon.com/storagegateway/latest/userguide/WhatIsStorageGateway.html) - Amazon S3 File Gateway supports a file interface into Amazon Simple Storage Service (Amazon S3) and combines a service and a virtual software appliance - By using this combination, you can store and retrieve objects in Amazon S3 using industry-standard file protocols such as Network File System (NFS) and Server Message Block (SMB). - The software appliance, or gateway, is deployed into your on-premises environment as a virtual machine (VM) running on VMware ESXi, Microsoft Hyper-V, or Linux Kernel-based Virtual Machine (KVM) hypervisor. - The gateway provides access to objects in S3 as files or file share mount points. ### Amazon FSx File Gateway [Amazon FSx File Gateway](https://docs.aws.amazon.com/storagegateway/latest/userguide/WhatIsStorageGateway.html) - Amazon FSx File Gateway (FSx File) is a new file gateway type that provides low latency, and efficient access to in-cloud Amazon FSx for Windows File Server file shares from your on-premises facility. ### File Gateway [File Gateway](https://aws.amazon.com/storagegateway/file/) - Nearly all enterprises, regardless of industry, have to store files, whether they are backups, media content, or files generated by specialized industry applications. - You can choose from two file gateway types for your latency-sensitive applications and workloads that require local caching and file protocol access. - Amazon S3 File Gateway enables you to store file data as objects in Amazon S3 cloud storage for data lakes, backups, and ML workflows. - For user or team file shares, and file-based application migrations, Amazon FSx File Gateway provides low-latency, on-premises access to fully managed file shares in Amazon FSx for Windows File Server. ### Tape Gateway [Tape Gateway](https://docs.aws.amazon.com/storagegateway/latest/userguide/WhatIsStorageGateway.html) - A tape gateway provides cloud-backed virtual tape storage. - The tape gateway is deployed into your on-premises environment as a VM running on VMware ESXi, KVM, or Microsoft Hyper-V hypervisor. [Tape Gateway](https://aws.amazon.com/storagegateway/vtl/) - Tape Gateway enables you to replace using physical tapes on premises with virtual tapes in AWS without changing existing backup workflows. - Tape Gateway encrypts data between the gateway and AWS for secure data transfer, and compresses data and transitions virtual tapes between Amazon S3 and Amazon S3 Glacier, or Amazon S3 Glacier Deep Archive, to minimize storage costs. - you can't directly fetch the media files from your tape gateway in real-time since this is backed up using Glacier. > Set up a tape gateway appliance on-premises and connect it to your AWS Storage Gateway ### Volume Gateway [Volume Gateway](https://docs.aws.amazon.com/storagegateway/latest/userguide/WhatIsStorageGateway.html) - A volume gateway provides cloud-backed storage volumes that you can mount as Internet Small Computer System Interface (iSCSI) devices from your on-premises application servers. - The volume gateway is deployed into your on-premises environment as a VM running on VMware ESXi, KVM, or Microsoft Hyper-V hypervisor. - The gateway supports the following volume configurations: - Cached volumes - You store your data in Amazon Simple Storage Service (Amazon S3) and retain a copy of frequently accessed data subsets locally - Stored volumes - If you need low-latency access to your entire dataset, first configure your on-premises gateway to store all your data locally. Then asynchronously back up point-in-time snapshots of this data to Amazon S3. ## How Storage Gateway works (architecture) [How Storage Gateway works (architecture)](https://docs.aws.amazon.com/storagegateway/latest/userguide/StorageGatewayConcepts.html#volume-gateway-concepts) ## AWS Storage Gateway quotas [AWS Storage Gateway quotas](https://docs.aws.amazon.com/storagegateway/latest/userguide/resource-gateway-limits.html#resource-volume-limits) ## Security ### Configuring CHAP authentication for your volumes [Configuring CHAP authentication for your volumes](https://docs.aws.amazon.com/storagegateway/latest/userguide/GettingStartedConfigureChap.html) - In Storage Gateway, your iSCSI (Internet Small Computer System Interface) initiators connect to your volumes as iSCSI targets. - Storage Gateway uses Challenge-Handshake Authentication Protocol (CHAP) to authenticate iSCSI and initiator connections. - CHAP provides protection against playback attacks by requiring authentication to access storage volume targets. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/systemManager.md ================================================ > Revision Count: 1 # System Manager [What is AWS Systems Manager?](https://docs.aws.amazon.com/systems-manager/latest/userguide/what-is-systems-manager.html) [Cheat Sheet - AWS Systems Manager](https://tutorialsdojo.com/aws-systems-manager) - AWS Systems Manager (formerly known as SSM) - you can use to view and control your infrastructure on AWS - you can view operational data from multiple AWS services and automate operational tasks across your AWS resources. - helps you maintain security and compliance by scanning your managed instances and reporting on (or taking corrective action on) any policy violations it detects. - Systems Manager is comprised of individual capabilities, which are grouped into five categories: - Operations Management, - Application Management, - Change Management, - Node Management, - and Shared Resources. ## Systems Manager Patch Manager [AWS Systems Manager Patch Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-patch.html) - Patch Manager, a capability of AWS Systems Manager, automates the process of patching managed instances with both security related and other types of updates - You can use Patch Manager to apply patches for both operating systems and applications. (On Windows Server, application support is limited to updates for applications released by Microsoft.) - You can use Patch Manager to install Service Packs on Windows instances and perform minor version upgrades on Linux instances. - You can patch fleets of Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, or your on-premises servers and virtual machines (VMs) by operating system type. - You can install patches on a regular basis by scheduling patching to run as a Systems Manager maintenance window task. - Patch Manager integrates with AWS Identity and Access Management (IAM), AWS CloudTrail, and Amazon EventBridge to provide a secure patching experience that includes event notifications and the ability to audit usage. - Patch Manager uses `patch baselines`, which include rules for auto-approving patches within days of their release, in addition to a list of approved and rejected patches. - You can install patches on a regular basis by scheduling patching to run as a Systems Manager maintenance window task. You can also install patches individually or to large groups of managed nodes by using tags. (Tags are keys that help identify and sort your resources within your organization.) - You can add tags to your patch baselines themselves when you create or update them. ## Monitoring ### Sending SSM Agent logs to CloudWatch Logs [Sending SSM Agent logs to CloudWatch Logs](https://docs.aws.amazon.com/systems-manager/latest/userguide/monitoring-ssm-agent.html) - AWS Systems Manager Agent (SSM Agent) is Amazon software that runs on your EC2 instances and your hybrid instances (on-premises instances and virtual machines) that are configured for Systems Manager - SSM Agent processes requests from the Systems Manager service in the cloud and configures your machine as specified in the request ## Application Management ### AWS Systems Manager Parameter Store [AWS Systems Manager Parameter Store](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-parameter-store.html) - Parameter Store, a capability of AWS Systems Manager, provides secure, hierarchical storage for configuration data management and secrets management - You can store data such as passwords, database strings, Amazon Machine Image (AMI) IDs, and license codes as parameter values - You can store values as plain text or encrypted data - Parameter Store is also integrated with Secrets Manager ### Setting up AWS Systems Manager for hybrid environments [Setting up AWS Systems Manager for hybrid environments](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances.html) Configuring your hybrid environment for Systems Manager allows you to do the following: - Create a consistent and secure way to remotely manage your hybrid workloads from one location using the same tools or scripts. - Centralize access control for actions that can be performed on your servers and VMs by using AWS Identity and Access Management (IAM). - Centralize auditing and your view into the actions performed on your servers and VMs by recording all actions in AWS CloudTrail. - Centralize monitoring by configuring Amazon EventBridge and Amazon Simple Notification Service (Amazon SNS) to send notifications about service execution success. #### Step 7: (Optional) Create Systems Manager service roles [Step 7: (Optional) Create Systems Manager service roles](https://docs.aws.amazon.com/systems-manager/latest/userguide/setup-service-role.html) - This topic explains the difference between a service role and a service-linked role for AWS Systems Manager. It also explains when you need to create or use either type of role. - Service role: - A service role is an AWS Identity and Access Management (IAM) that grants permissions to an AWS service so that the service can access AWS resources. - Only a few Systems Manager scenarios require a service role. - When you create a service role for Systems Manager, you choose the permissions to grant in order for it to access or interact with other AWS resources. - Service-linked role: - A service-linked role is predefined by Systems Manager and includes all the permissions that the service requires to call other AWS services on your behalf. ### About SSM documents for patching managed nodes [About SSM documents for patching managed nodes](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-ssm-documents.html) The five recommended SSM documents include: - AWS-ConfigureWindowsUpdate - AWS-InstallWindowsUpdates - AWS-RunPatchBaseline - AWS-RunPatchBaselineAssociation - AWS-RunPatchBaselineWithHooks ### About patching schedules using maintenance windows [About patching schedules using maintenance windows](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-patch-scheduletasks.html) - After you configure a patch baseline (and optionally a patch group), you can apply patches to your node by using a maintenance window. - A maintenance window can reduce the impact on server availability by letting you specify a time to perform the patching process that doesn't interrupt business operations. ### AWS Systems Manager Automation [AWS Systems Manager Automation](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-automation.html) - Automation, a capability of AWS Systems Manager, simplifies common maintenance and deployment tasks of Amazon Elastic Compute Cloud (Amazon EC2) instances and other AWS resources. - Build automations to configure and manage instances and AWS resources. - Create custom runbooks or use pre-defined runbooks maintained by AWS. - Receive notifications about Automation tasks and runbooks by using Amazon EventBridge. - Monitor Automation progress and details by using the Systems Manager console. ### Actions #### DeleteParameter - Delete a parameter from the system. After deleting a parameter, wait for at least 30 seconds to create a parameter with the same name. ## Notes - The AWS Systems Manager Run Command is primarily used to remotely manage the configuration of your managed instances while AWS Systems Manager State Manager is just a configuration management service that automates the process of keeping your Amazon EC2 and hybrid infrastructure in a state that you define ## Blogs - [Patching your Windows EC2 instances using AWS Systems Manager Patch Manager](https://aws.amazon.com/blogs/mt/patching-your-windows-ec2-instances-using-aws-systems-manager-patch-manager) - For example, you can create patch groups for different environments/tagged instances such as development, test, and production - you can install patches on a regular basis by scheduling patching to run as a Maintenance Windows task - A patch baseline defines which patches should and shouldn’t be installed on your instances. - You can individually specify approved or rejected patches, or you can use auto-approval rules to specify that certain types of updates (for example, critical updates), should automatically be approved for patching. - AWS Systems Manager Maintenance Windows let you define a schedule for when to perform potentially disruptive actions on your instances such as patching an operating system (OS), updating drivers, or installing software. - Each Maintenance Window has a schedule, a duration, a set of registered targets, and a set of registered tasks. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/transcribe.md ================================================ # Transcribe [Cheat Sheet - Transcribe](https://tutorialsdojo.com/amazon-transcribe) - Amazon Transcribe is an AWS service that makes it easy for customers to convert speech-to-text. ## How Amazon Transcribe works [How Amazon Transcribe works](https://docs.aws.amazon.com/transcribe/latest/dg/how-it-works.html) - Amazon Transcribe analyzes audio files that contain speech and uses advanced machine learning techniques to transcribe the voice data into text ## Actions ### StartTranscriptionJob [StartTranscriptionJob](https://docs.aws.amazon.com/transcribe/latest/dg/API_StartTranscriptionJob.html) - Starts an asynchronous job to transcribe speech to text. ## FAQs [FAQs](https://aws.amazon.com/transcribe/faqs) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/vpc.md ================================================ # VPC [Cheat Sheet - AWS VPC](https://tutorialsdojo.com/amazon-vpc) [Cheat Sheet - VPC Peering](https://tutorialsdojo.com/vpc-peering) [Cheat Sheet - AWS Transit Gateway](https://tutorialsdojo.com/aws-transit-gateway) ## Network ACLs [Network ACLs](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html) - A network access control list (ACL) is an optional layer of security for your VPC that acts as a firewall for controlling traffic in and out of one or more subnets. - You might set up network ACLs with rules similar to your security groups in order to add an additional layer of security to your VPC. ## VPC Networking Components ## NAT gateways [NAT gateways](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html) - A NAT gateway is a Network Address Translation (NAT) service. - You can use a NAT gateway so that instances in a private subnet can connect to services outside your VPC but external services cannot initiate a connection with those instances. - When you create a NAT gateway, you specify one of the following connectivity types: - Public – (Default) Instances in private subnets can connect to the internet through a public NAT gateway, but cannot receive unsolicited inbound connections from the internet. - Private – Instances in private subnets can connect to other VPCs or your on-premises network through a private NAT gateway. - You can route traffic from the NAT gateway through a transit gateway or a virtual private gateway. - You cannot associate an elastic IP address with a private NAT gateway. ### NAT instances [NAT instances](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html) - You can create your own AMI that provides network address translation and use your AMI to launch an EC2 instance as a NAT instance. - You launch a NAT instance in a public subnet to enable instances in the private subnet to initiate outbound IPv4 traffic to the internet or other AWS services, but prevent the instances from receiving inbound traffic initiated on the internet. ### NAT devices for your VPC [NAT devices for your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat.html) ### Compare NAT gateways and NAT instances [Compare NAT gateways and NAT instances](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-comparison.html) ## VPC peering [VPC peering](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-peering.html) - A VPC peering connection is a networking connection between two VPCs that enables you to route traffic between them privately. - Instances in either VPC can communicate with each other as if they are within the same network. - You can create a VPC peering connection between your own VPCs, with a VPC in another AWS account, or with a VPC in a different AWS Region. ### Configurations with specific routes [Configurations with specific routes](https://docs.aws.amazon.com/vpc/latest/peering/peering-configurations-partial-access.html) [One VPC peered with two VPCs using longest prefix match](https://docs.aws.amazon.com/vpc/latest/peering/peering-configurations-partial-access.html#one-to-two-vpcs-lpm) ## Route Tables ### Example routing options [Example routing options](https://docs.aws.amazon.com/vpc/latest/userguide/route-table-options.html) ## VPC Flow Logs [VPC Flow Logs](https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html) - VPC Flow Logs is a feature that enables you to capture information about the IP traffic going to and from network interfaces in your VPC. - Flow log data can be published to Amazon CloudWatch Logs or Amazon S3. - After you create a flow log, you can retrieve and view its data in the chosen destination. - Flow logs can help you with a number of tasks, such as: - Diagnosing overly restrictive security group rules - Monitoring the traffic that is reaching your instance - Determining the direction of the traffic to and from the network interfaces ## VPC Endpoints ### Interface VPC endpoints (AWS PrivateLink) [Interface VPC endpoints (AWS PrivateLink)](https://docs.aws.amazon.com/vpc/latest/privatelink/vpce-interface.html) - An interface VPC endpoint (interface endpoint) allows you to connect to services powered by AWS PrivateLink - These services include some AWS services, services hosted by other AWS customers and Partners in their own VPCs (referred to as endpoint services), and supported AWS Marketplace Partner services. - The owner of the service is the service provider, and you, as the principal creating the interface endpoint, are the service consumer. [How do I configure security and network ACLs for my interface-based Amazon VPC endpoint for endpoint services?](https://aws.amazon.com/premiumsupport/knowledge-center/security-network-acl-vpc-endpoint) > When you create an Amazon VPC endpoint interface with AWS PrivateLink, an Elastic Network Interface is created inside of the subnet that you specify. This interface VPC endpoint (interface endpoint) inherits the network ACL of the associated subnet. You must associate a security group with the interface endpoint to protect incoming and outgoing requests. ## DNS support for your VPC [DNS attributes in your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-dns.html#vpc-dns-support) DNS attributes in your VPC - enableDnsHostnames - enableDnsSupport > IF both attributes are enabled, an instance launched into the VPC receives a public DNS hostname IF it is assigned a public IPv4 address or an Elastic IP address at creation. ## DHCP options sets for your VPC [DHCP options sets for your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) - The Dynamic Host Configuration Protocol (DHCP) provides a standard for passing configuration information to hosts on a TCP/IP network. - The options field of a DHCP message contains configuration parameters, including the domain name, domain name server, and the netbios-node-type. - When you create a VPC, we automatically create a set of DHCP options and associate them with the VPC. You can configure your own DHCP options set for your VPC. ### Work with shared VPCs [Blog - VPC sharing: A new approach to multiple accounts and VPC management](https://aws.amazon.com/blogs/networking-and-content-delivery/vpc-sharing-a-new-approach-to-multiple-accounts-and-vpc-management) [Work with shared VPCs](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-sharing.html) - VPC sharing allows multiple AWS accounts to create their application resources, such as Amazon EC2 instances, Amazon Relational Database Service (RDS) databases, Amazon Redshift clusters, and AWS Lambda functions, into shared, centrally-managed virtual private clouds (VPCs) - In this model, the account that owns the VPC (owner) shares one or more subnets with other accounts (participants) that belong to the same organization from AWS Organizations - After a subnet is shared, the participants can view, create, modify, and delete their application resources in the subnets shared with them. - Participants cannot view, modify, or delete resources that belong to other participants or the VPC owner. [VPC Flow Logs](https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html) VPC Flow Logs is a feature that enables you to capture information about the IP traffic going to and from network interfaces in your VPC. Flow log data can be published to Amazon CloudWatch Logs or Amazon S3. ### Internetwork traffic privacy in Amazon VPC [Internetwork traffic privacy in Amazon VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Security.html) - Security groups: Security groups act as a firewall for associated Amazon EC2 instances, controlling both inbound and outbound traffic at the instance level. - Network access control lists (ACLs): Network ACLs act as a firewall for associated subnets, controlling both inbound and outbound traffic at the subnet level. - Flow logs: Flow logs capture information about the IP traffic going to and from network interfaces in your VPC. - Traffic mirroring: You can copy network traffic from an elastic network interface of an Amazon EC2 instance. ## Blogs - [How to set up an outbound VPC proxy with domain whitelisting and content filtering](https://aws.amazon.com/blogs/security/how-to-set-up-an-outbound-vpc-proxy-with-domain-whitelisting-and-content-filtering) - [How to set up an outbound VPC proxy with domain whitelisting and content filtering](https://aws.amazon.com/blogs/security/how-to-set-up-an-outbound-vpc-proxy-with-domain-whitelisting-and-content-filtering) - Controlling outbound communication from your Amazon Virtual Private Cloud (Amazon VPC) to the internet is an important part of your overall preventive security controls. - By limiting outbound traffic to certain trusted domains (called “whitelisting”) you help prevent instances from downloading malware, communicating with bot networks, or attacking internet hosts. ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/waf.md ================================================ # WAF [AWS WAF](https://aws.amazon.com/waf/) [Cheat Sheet - AWS WAF](https://tutorialsdojo.com/aws-waf) - AWS WAF is a web application firewall that helps protect your web applications or APIs against common web exploits and bots that may affect availability, compromise security, or consume excessive resources. - AWS WAF gives you control over how traffic reaches your applications by enabling you to create security rules that control bot traffic and block common attack patterns, such as SQL injection or cross-site scripting. - You can also customize rules that filter out specific traffic patterns. ## What are AWS WAF, AWS Shield, and AWS Firewall Manager? [What are AWS WAF, AWS Shield, and AWS Firewall Manager?](https://docs.aws.amazon.com/waf/latest/developerguide/what-is-aws-waf.html) - AWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to an Amazon CloudFront distribution, an Amazon API Gateway REST API, an Application Load Balancer, or an AWS AppSync GraphQL API. - AWS WAF also lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, Amazon CloudFront, Amazon API Gateway, Application Load Balancer, or AWS AppSync responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). [White paper - aws-waf-owasp](https://d0.awsstatic.com/whitepapers/Security/aws-waf-owasp.pdf) [Prepare for the OWASP Top 10 Web Application Vulnerabilities Using AWS WAF](https://aws.amazon.com/blogs/aws/prepare-for-the-owasp-top-10-web-application-vulnerabilities-using-aws-waf-and-our-new-white-paper/) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/webIdentifyFederation.md ================================================ # Web Identify Federation [web identity federation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html) - Imagine that you are creating a mobile app that accesses AWS resources, such as a game that runs on a mobile device and stores player and score information using Amazon S3 and DynamoDB. - build your app so that it requests temporary AWS security credentials dynamically when needed using web identity federation. The supplied temporary credentials map to an AWS role that has only the permissions needed to perform the tasks required by the mobile app. - With web identity federation, you don't need to create custom sign-in code or manage your own user identities. Instead, users of your app can sign in using a well-known external identity provider (IdP), such as Login with Amazon, Facebook, Google, or any other OpenID Connect (OIDC)-compatible IdP. They can receive an authentication token, and then exchange that token for temporary security credentials in AWS that map to an IAM role with permissions to use the resources in your AWS account ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/wellArchitected.md ================================================ # Well-Architected ## Plan for Disaster Recovery (DR) [Cheat Sheet - backup-and-restore-vs-pilot-light-vs-warm-standby-vs-multi-site](https://tutorialsdojo.com/backup-and-restore-vs-pilot-light-vs-warm-standby-vs-multi-site) [Plan for Disaster Recovery (DR)](https://docs.aws.amazon.com/wellarchitected/latest/reliability-pillar/plan-for-disaster-recovery-dr.html) - Having backups and redundant workload components in place is the start of your DR strategy. RTO and RPO are your objectives for restoration of your workload - Recovery Time Objective (RTO) is defined by the organization. RTO is the maximum acceptable delay between the interruption of service and restoration of service. This determines what is considered an acceptable time window when service is unavailable. - Recovery Point Objective (RPO) is defined by the organization. RPO is the maximum acceptable amount of time since the last data recovery point. This determines what is considered an acceptable loss of data between the last recovery point and the interruption of service. When architecting a multi-region disaster recovery strategy for your workload, you should choose one of the following multi-region strategies. - Backup and restore (RPO in hours, RTO in 24 hours or less): - Pilot light (RPO in minutes, RTO in hours) - Warm standby (RPO in seconds, RTO in minutes) - Multi-region (multi-site) active-active (RPO near zero, RTO potentially zero) ================================================ FILE: home/cloud_certifications/aws/taskset_aws_cloud_certifications/task_001_aws_certified_solutions_architect_professional/whitepapers.md ================================================ # Whitepapers [Network-to-Amazon VPC connectivity options](https://docs.aws.amazon.com/whitepapers/latest/aws-vpc-connectivity-options/network-to-amazon-vpc-connectivity-options.html) [AWS Direct Connect + VPN](https://docs.aws.amazon.com/whitepapers/latest/aws-vpc-connectivity-options/aws-direct-connect-vpn.html) - With AWS Direct Connect + VPN, you can combine AWS Direct Connect dedicated network connections with the Amazon VPC VPN. AWS Direct Connect **public VIF** established a dedicated network connection between your network to public AWS resources, such as an Amazon virtual private gateway IPsec endpoint. The following figure illustrates this option. - You must use a public virtual interface for your AWS Direct Connect (DX) connection and not a private one - [Blue Green Deployments](https://d0.awsstatic.com/whitepapers/AWS_Blue_Green_Deployments.pdf) - [Clone a Stack in AWS OpsWorks and Update DNS](https://docs.aws.amazon.com/whitepapers/latest/blue-green-deployments/clone-a-stack-in-aws-opsworks-and-update-dns.html) - AWS OpsWorks utilizes the concept of stacks, which are logical groupings of AWS resources (EC2 instances, Amazon RDS, Elastic Load Balancing, and so on) that have a common purpose and should be logically managed together - [Building a Scalable and Secure Multi-VPC AWS Network Infrastructure](https://d1.awsstatic.com/whitepapers/building-a-scalable-and-secure-multi-vpc-aws-network-infrastructure.pdf) - [Centralized egress to internet](https://docs.aws.amazon.com/whitepapers/latest/building-scalable-secure-multi-vpc-network-infrastructure/centralized-egress-to-internet.html) - Deploying a NAT Gateway in every spoke VPC can become expensive because you pay an hourly charge for every NAT Gateway you deploy (see Amazon VPC pricing), so centralizing it could be a viable option. - To centralize, we create an egress VPC in the network services account and route all egress traffic from the spoke VPCs via a NAT Gateway sitting in this VPC leveraging Transit Gateway - [Transit Gateway](https://docs.aws.amazon.com/whitepapers/latest/building-scalable-secure-multi-vpc-network-infrastructure/transit-gateway.html) - AWS Transit Gateway provides a hub and spoke design for connecting VPCs and on-premises networks as a fully managed service without requiring you to provision virtual appliances like the Cisco CSRs. - [AWS Transit Gateway](https://docs.aws.amazon.com/whitepapers/latest/aws-vpc-connectivity-options/aws-transit-gateway.html) - AWS Transit Gateway is a highly available and scalable service to consolidate the AWS VPC routing configuration for a region with a hub-and- spoke architecture. - Each spoke VPC only needs to connect to the Transit Gateway to gain access to other connected VPCs. - Transit Gateway across different regions can peer with each other to enable VPC communications across regions. - With large number of VPCs, Transit Gateway provides simpler VPC-to-VPC communication management over VPC Peering - [Security Groups and Network Access Control Lists (Network ACLs) (BP5)](https://docs.aws.amazon.com/whitepapers/latest/aws-best-practices-ddos-resiliency/security-groups-and-network-access-control-lists-nacls-bp5.html) - [Using AWS for Disaster Recovery](https://aws.amazon.com/blogs/aws/new-whitepaper-use-aws-for-disaster-recovery) - [Overview of Deployment Options on AWS](https://docs.aws.amazon.com/whitepapers/latest/overview-deployment-options/aws-deployment-services.html) - [AWS Best Practices for DDoS Resiliency](https://docs.aws.amazon.com/whitepapers/latest/aws-best-practices-ddos-resiliency/welcome.html) ================================================ FILE: home/cloud_providers/aws/ReadMe-static.md ================================================ # AWS Taskset References - [copy-data-from-an-s3-bucket-in-one-account-and-region-to-another-account-and-region](https://docs.aws.amazon.com/prescriptive-guidance/latest/patterns/copy-data-from-an-s3-bucket-in-one-account-and-region-to-another-account-and-region.html) ================================================ FILE: home/cloud_providers/aws/ReadMe.md ================================================ # taskset_aws_cloud_providers > [Auto](https://github.com/codeaprendiz/learn_fullstack/blob/main/home/php/intermediate/taskset_intermediate_php/task_004_createGlobalMarkdownTable/generate-readme.php) generated ReadMe. Number of tasks: 9 | Task | Description | |----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | task_001 | [task_001_kms](taskset_aws_cloud_providers/task_001_kms) | | task_002 | [task_002_monitoring_msk](taskset_aws_cloud_providers/task_002_monitoring_msk) | | task_003 | [task_003_redirection_using_s3_cloudfront](taskset_aws_cloud_providers/task_003_redirection_using_s3_cloudfront) | | task_004 | [task_004_trigger_codebuild_PR_events__eventbridge__static_branc](taskset_aws_cloud_providers/task_004_trigger_codebuild_PR_events__eventbridge__static_branc) | | task_005 | [task_005_trigger_codebuild_PR_events__eventbridge__lambda__dynamic_branches](taskset_aws_cloud_providers/task_005_trigger_codebuild_PR_events__eventbridge__lambda__dynamic_branches) | | task_006 | [task_006_codebuild_codecommit_test_reports_gradle](taskset_aws_cloud_providers/task_006_codebuild_codecommit_test_reports_gradle) | | task_007 | [task_007_codebuild_codecommit_test_reports_mvn](taskset_aws_cloud_providers/task_007_codebuild_codecommit_test_reports_mvn) | | task_008 | [task_008_trigger_lambda_from_codecommit_using_event_bridge](taskset_aws_cloud_providers/task_008_trigger_lambda_from_codecommit_using_event_bridge) | | task_009 | [task_009_pass_vars_dynamically_from_codebuild_to_codepipeline](taskset_aws_cloud_providers/task_009_pass_vars_dynamically_from_codebuild_to_codepipeline) | ================================================ FILE: home/cloud_providers/aws/taskset_aws_cloud_providers/task_001_kms/ReadMe.md ================================================ ### Objective To use the AWS KMS to encrypt and decrypt data using the aws kms Inputs : 1) You have already created a CUSTOMER MANAGER KEY using the AWS console with the alias name as `master` 2) You have also created an IAM user with policy `AWSKeyManagementServicePowerUser` Docs and Links referred [aws kms generate-data-key](https://docs.aws.amazon.com/cli/latest/reference/kms/generate-data-key.html) [youtube](https://www.youtube.com/watch?v=f3APF1dP8w0&t=604s&ab_channel=EnlearAcademy) #### Step 1 Configure the AWS account using the right region and correct access keys ```bash $ aws configure AWS Access Key ID [****************]: AWS Secret Access Key [****************]: Default region name [ap-south-1]: Default output format [None]: ``` #### Step 2 Let's list the aliases first to validate ```bash $ aws kms list-aliases | grep master | grep -v "arn" "AliasName": "alias/master", ``` #### Step 3 Generate the symmetric keys using `generate-data-key` method. Note that these keys are base64 encoded. ```bash $ aws kms generate-data-key --key-id alias/master --key-spec AES_256 --region ap-south-1 { "CiphertextBlob": "your_key_in_cipher_text", "Plaintext": "your_key_in_plain_text", "KeyId": "your_key_aws_arn" } ``` #### Step 4 Decode you keys and save them in files ```bash $ echo "your_key_in_cipher_text" | base64 --decode > ciphertextblob $ echo "your_key_in_plain_text" | base64 --decode > plaintext ``` #### Step 5 Let's create a sensitive datafile ```bash $ cat sensitivedatafile.txt This is very sensitive data please do not copy copying the data is strictly prohibited okay you can copy the data. ``` #### Step 6 Encrypt the sensitive data file ```bash $ openssl enc -in ./sensitivedatafile.txt -out ./sensitivedatafile_encrypted.txt -e -aes256 -k ./plaintext ``` > NOTE: Delete your plaintext symmetric key now. We will only store the encrypted version of the plaintext key ```bash $ mv plaintext ~/tmp/ $ mv sensitivedatafile.txt /tmp/ ``` Okay, I moved it to `/tmp` just in case. But we need to remove! Very Important! #### Step 7 Now the task is to decrypt the `sensitivedatafile_encrypted.txt` using the `ciphertextblob`. Both of them are encrypted files. So first we will get the plaintext version of our datakey using the aws cli ```bash $ aws kms decrypt --ciphertext-blob fileb://ciphertextblob --region ap-south-1 { "KeyId": "aws_arn", "Plaintext": "your_plaintext_key", "EncryptionAlgorithm": "SYMMETRIC_DEFAULT" } ``` Decode the key ```bash $ echo "your_plaintext_key" | base64 --decode > plaintext ``` #### Step 8 Finally decrypt the `sensitivedatafile_encrypted.txt` using the `plaintext` key you just obtained. We will use the openssl library for the same ```bash $ openssl enc -in ./sensitivedatafile_encrypted.txt -out ./sensitivedatafile_final.txt -d -aes256 -k ./plaintext $ cat sensitivedatafile_final.txt This is very sensitive data please do not copy copying the data is strictly prohibited okay you can copy the data. ``` ================================================ FILE: home/cloud_providers/aws/taskset_aws_cloud_providers/task_002_monitoring_msk/ReadMe.md ================================================ ### Monitoring AWS MSK You cannot know what you cannot see. You can enable monitoring while setting up MSK. Check out their official documentation which is more than enough [AWS Docs](https://docs.aws.amazon.com/msk/latest/developerguide/monitoring.html) [Official Docs](https://kafka.apache.org/documentation/#monitoring) [Confluent Docs](https://docs.confluent.io/platform/current/kafka/monitoring.html) [kafka-lag-monitoring-and-metrics-at-appsflyer](https://www.confluent.io/blog/kafka-lag-monitoring-and-metrics-at-appsflyer/) #### What metrics we need to monitor [metrics-details.html#default-metric](https://docs.aws.amazon.com/msk/latest/developerguide/metrics-details.html#default-metrics) - Number of active controllers : Should always be one - Number of UnderReplicatedPartions : Should always be zero - Number of Offline Partitions : Should always be zero #### Why does lag matter? Why does lag matter and why does it need to be treated differently than other metrics in the system? Lag is a key performance indicator (KPI) for Kafka. When building an event streaming platform, the consumer group lag is one of the crucial metrics to monitor. As mentioned earlier, when an application consumes messages from Kafka, it commits its offset in order to keep its position in the partition. When a consumer gets stuck for any reason—for example, an error, rebalance, or even a complete stop—it can resume from the last committed offset and continue from the same point in time. Therefore, lag is the delta between the last committed message to the last produced message. In other words, lag indicates how far behind your application is in processing up-to-date information. To make matters worse, remember that Kafka persistence is based on retention, meaning that if your lag persists, you will lose data at some point in time. The goal is to keep lag to a minimum. ================================================ FILE: home/cloud_providers/aws/taskset_aws_cloud_providers/task_003_redirection_using_s3_cloudfront/ReadMe.md ================================================ ### Redirect Apex domain to another domain's subdomain with browser change in URL Requirements - firstdomain.com should be redirected to https://sub.seconddomain.com - http://firstdomain.com should be redirected to https://sub.seconddomain.com - https://firstdomain.com should be redirected to https://sub.seconddomain.com Documents Referred [https://aws.amazon.com/premiumsupport/knowledge-center/route-53-redirect-to-another-domain](https://aws.amazon.com/premiumsupport/knowledge-center/route-53-redirect-to-another-domain) [stackoverflow](https://stackoverflow.com/questions/10115799/set-up-dns-based-url-forwarding-in-amazon-route53/14289082#14289082) #### S3 configuraton - Name of the bucket : firstdomain.com - Enable the static website hosting and Redirect requests for an object and set the `Hostname` to `sub.seconddomain.com`. Protocol should be `https` - Keep a copy of the website endpoint `http://firstdomain.com.s3-website.ap-south-1.amazonaws.com` ### CloudFront configuration - Create cloudfront distribution with alternamte domain name as `firstdomain.com` - You will need to create an ACM certificate in `us-east-1` for the CloudFront, ensure that the ACM certificate supports the domians `*.firstdomain.com`, `firstdomain.com`, `www.firstdomain.com`, `*.seconddomain.com`, `seconddomain.com`, `www.seconddomain.com` - Keep the origin domain as `firstdomain.com.s3-website.ap-south-1.amazonaws.com`, what noted in previous step without the `http` - Origin Protocol should be `http` as for S3-website configuration only supports http requests. So the http port will also be 80 - Viewer, protocol policy `Redirect HTTP to HTTPS` - You can keep all the HTTP methods as allowed - Make a not of the distribution domain name `https://something.cloudfront.net` ### Route53 configuration - Go to the hosted zone `firstdomain.com` - Create an `Alias A IPv4` record for `firstdomain.com` pointing to `something.cloudfront.net` ### Validation - When there is `cache miss` from the cloudfront ```bash $ curl -I http://something.cloudfront.net -L HTTP/1.1 301 Moved Permanently Server: CloudFront Date: Thu, 12 Aug 2021 12:12:04 GMT Content-Type: text/html Content-Length: 183 Connection: keep-alive Location: https://something.cloudfront.net/ X-Cache: Redirect from cloudfront Via: 1.1 5dd0dcc9e0464f63fa9f8c3a40.cloudfront.net (CloudFront) X-Amz-Cf-Pop: DEL54-C4 X-Amz-Cf-Id: 5kX-_t55pHGTMaZt046sbSyS9geMsw8RagPXNGdiqthnV9HEJc18Rw== HTTP/2 301 content-length: 0 location: https://sub.seconddomain.com/ date: Thu, 12 Aug 2021 12:12:05 GMT server: AmazonS3 x-cache: Miss from cloudfront via: 1.1 5ef0432e6c0ac31f0b8bdb72d3755f66.cloudfront.net (CloudFront) x-amz-cf-pop: DEL54-C4 x-amz-cf-id: nZGDaK7tSmo4hwC6jlT9fLV5rjNglbNajvLtj0y54vROJg18Qislrg== HTTP/1.1 404 Not Found Content-Length: 19 Content-Type: text/plain; charset=utf-8 Date: Thu, 12 Aug 2021 12:12:04 GMT X-Content-Type-Options: nosniff Connection: keep-alive ``` - When there is `hit` from the cloudfront ```bash $ curl -I http://something.cloudfront.net -L HTTP/1.1 301 Moved Permanently Server: CloudFront Date: Fri, 13 Aug 2021 11:17:07 GMT Content-Type: text/html Content-Length: 183 Connection: keep-alive Location: https://something.cloudfront.net/ X-Cache: Redirect from cloudfront Via: 1.1 637fcf134a6acd248c904995685d8a65.cloudfront.net (CloudFront) X-Amz-Cf-Pop: DEL54-C4 X-Amz-Cf-Id: MZa1056r6UIWlshM0FzGsVoAMtdVtkW8-5JMSb2JxngFIkC2kdNT4g== HTTP/2 301 content-length: 0 location: https://sub.seconddomain.com/ date: Thu, 12 Aug 2021 12:12:05 GMT server: AmazonS3 x-cache: Hit from cloudfront via: 1.1 d074672a93d4cecfc24649b988ca81dc.cloudfront.net (CloudFront) x-amz-cf-pop: DEL54-C4 x-amz-cf-id: lQyKipnkYjneJ27p1ox3-bLEbnrrV49dOIMq8iXyZtP1Q402rPBKEw== age: 83103 HTTP/1.1 404 Not Found Content-Length: 19 Content-Type: text/plain; charset=utf-8 Date: Fri, 13 Aug 2021 11:17:07 GMT X-Content-Type-Options: nosniff Connection: keep-alive ``` ### Issues you might face - Note that the CNAME is added to the CDN and is supported by the ACM certificate - Sometimes its just the cloudfront, because it take sometime to reflect the values. You can invalidate the cloudfront cache by using `Cache invalidation` for `*/` ================================================ FILE: home/cloud_providers/aws/taskset_aws_cloud_providers/task_004_trigger_codebuild_PR_events__eventbridge__static_branc/ReadMe.md ================================================ # Trigger Codebuild on PR events using EventBridge (the hardcoded branch get's triggered) - [Trigger Codebuild on PR events using EventBridge (the hardcoded branch get's triggered)](#trigger-codebuild-on-pr-events-using-eventbridge-the-hardcoded-branch-gets-triggered) - [Docs Referred](#docs-referred) - [Existing Issue](#existing-issue) - [Create a repository in AWS CodeCommit](#create-a-repository-in-aws-codecommit) - [Create a CodeBuild project - codebuild-app](#create-a-codebuild-project---codebuild-app) - [Create Pipeline in CodePipeline](#create-pipeline-in-codepipeline) - [Commit to the repository and check if build get's triggered](#commit-to-the-repository-and-check-if-build-gets-triggered) - [Create EventBridge Rule](#create-eventbridge-rule) - [Create a PR and check if build gets triggered](#create-a-pr-and-check-if-build-gets-triggered) ## Docs Referred [Automated Code Review on Pull Requests using AWS CodeCommit and AWS CodeBuild](https://aws.amazon.com/blogs/devops/automated-code-review-on-pull-requests-using-aws-codecommit-and-aws-codebuild/) ## Existing Issue The codebuild always runs against the same branch which is hardcoded in the codebuild project. We want to trigger the codebuild on PR events with the source branch as the PR branch. ## Create a repository in AWS CodeCommit ```bash mkdir tmp # tmp is added to .gitignore cd tmp ``` ```bash # Make sure you upload your public key to AWS IAM user security credentials section cat ~/.ssh/config Host git-codecommit.*.amazonaws.com User IdentityFile ~/.ssh/id_rsa ``` ## Create a CodeBuild project - codebuild-app Role Created automatically - `codebuild-app-codebuild-service-Role` ## Create Pipeline in CodePipeline New Role created `AWSCodePipelineServiceRole-xx-region-y-app-pipeline` ## Commit to the repository and check if build get's triggered ## Create EventBridge Rule Role created `Amazon_EventBridge_Invoke_CodeBuild_5492177` Create a rule in default event bus ## Create a PR and check if build gets triggered ```bash gco -b "feat_pr_3" echo "test" >> ReadMe.md ``` > Note: The submitted is rule/pr_event_rule but the branch name is `ref/heads/master` ================================================ FILE: home/cloud_providers/aws/taskset_aws_cloud_providers/task_005_trigger_codebuild_PR_events__eventbridge__lambda__dynamic_branches/ReadMe.md ================================================ # Trigger codebuild PR events using EventBridge (Lambda, Dynamic Branches) - [Trigger codebuild PR events using EventBridge (Lambda, Dynamic Branches)](#trigger-codebuild-pr-events-using-eventbridge-lambda-dynamic-branches) - [Objective](#objective) - [Reasearch Links and docs with relevant information](#reasearch-links-and-docs-with-relevant-information) - [Create repo-a in Codecommit](#create-repo-a-in-codecommit) - [Create repo-b in Codecommit](#create-repo-b-in-codecommit) - [Create a codebuild project for repo-a - codebuild-repo-a](#create-a-codebuild-project-for-repo-a---codebuild-repo-a) - [Create a codebuild project for repo-b - codebuild-repo-b](#create-a-codebuild-project-for-repo-b---codebuild-repo-b) - [Create codepipeline for repo-a - codepipeline-repo-a](#create-codepipeline-for-repo-a---codepipeline-repo-a) - [Create codepipeline for repo-b - codepipeline-repo-b](#create-codepipeline-for-repo-b---codepipeline-repo-b) - [Create a lambda function to trigger respetive codebuilds](#create-a-lambda-function-to-trigger-respetive-codebuilds) - [Create a rule for repo-a in EventBridge to trigger the lambda function](#create-a-rule-for-repo-a-in-eventbridge-to-trigger-the-lambda-function) - [Create PR for repo-a](#create-pr-for-repo-a) - [Check logs for lambda in cloudwatch when PR is created](#check-logs-for-lambda-in-cloudwatch-when-pr-is-created) - [Logs in codebuild-repo-a](#logs-in-codebuild-repo-a) - [Create a rule for repo-b in EventBridge to trigger the lambda function](#create-a-rule-for-repo-b-in-eventbridge-to-trigger-the-lambda-function) - [Create PR for repo-b](#create-pr-for-repo-b) ## Objective - The PR in codebuild get's raised always with the branch hardcoded in build project. - We want to trigger codebuild with PR branches which requires involvement of lambda function. ## Reasearch Links and docs with relevant information [aws . codebuild . start-build](https://docs.aws.amazon.com/cli/latest/reference/codebuild/start-build.html) You can start codebuild using commands dynamically. You can use the same command in lambda function to trigger the codebuild project. [docs.aws.amazon.com » Monitoring CodeCommit events in Amazon EventBridge and Amazon CloudWatch Events](https://docs.aws.amazon.com/codecommit/latest/userguide/monitoring-events.html#pullRequestStatusChanged) You can get the payload from the event and use it in the lambda function for testing. [stackoverflow.com » Creating Lambda Function to Trigger Codebuild Project using Nodejs](https://stackoverflow.com/questions/56568921/creating-lambda-function-to-trigger-codebuild-project-using-nodejs) Trigger lambda using nodejs [docs.aws.amazon.com » Environment variables in build environments](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-env-vars.html) You can use environment variables in the buildspec.yaml file to get the source branch and use it in the buildspec.yaml file ## Create repo-a in Codecommit ## Create repo-b in Codecommit ## Create a codebuild project for repo-a - codebuild-repo-a ## Create a codebuild project for repo-b - codebuild-repo-b ## Create codepipeline for repo-a - codepipeline-repo-a ## Create codepipeline for repo-b - codepipeline-repo-b ## Create a lambda function to trigger respetive codebuilds Copy the code `lambda.py` to the lambda function Click on deploy to deploy the lambda function (your code get updated in the function) Click on Test and it should ask you to create a sample event using which we can test the lambda function Get the event from [docs.aws.amazon.com » Monitoring CodeCommit events in Amazon EventBridge and Amazon CloudWatch Events](https://docs.aws.amazon.com/codecommit/latest/userguide/monitoring-events.html#pullRequestStatusChanged) and paste it in the event Click on Test You will get error Add the permissions w.r.t to lambda_service_role (`codebuild-dynamic-communicator-role-py375219`) to trigger the codebuild project ```json { "Effect": "Allow", "Action": "codebuild:StartBuild", "Resource": "arn:aws:codebuild:xx-region-y:xxxxxxxxxxxxxx:project/*" } ``` ## Create a rule for repo-a in EventBridge to trigger the lambda function Select default event bus and create rule Give the arn of codecommit repo-a as source with events as PR_STATUS_CHANGE Select the target as lambda function Create rule ## Create PR for repo-a Add a `buildspec.yaml` file in repo-a to know the latest commit changes and update the codebuild-project-a ```bash $ gst On branch feat_pr_from_repo_a Changes not staged for commit: deleted: repo_a__commit_4 Untracked files: repo_a__commit_5 ``` Create the pull request ### Check logs for lambda in cloudwatch when PR is created ### Logs in codebuild-repo-a Add commits to PR ```bash $ gst On branch feat_pr_from_repo_a Changes not staged for commit: deleted: repo_a__commit_5 Untracked files: repo_a__commit_6 # Push the changes as new commit ``` See the build is triggered against PR branch Latest commit information is available in build logs ```bash [Container] 2024/04/27 04:19:26.605628 Running command ls -ltrh total 4.0K -rw-r--r-- 1 root root 0 Apr 27 04:19 repo_a__commit_6 -rw-r--r-- 1 root root 273 Apr 27 04:19 buildspec.yaml ``` > Note: The branch name is refs/heads/feat_pr_from_repo_a ## Create a rule for repo-b in EventBridge to trigger the lambda function ## Create PR for repo-b ```bash $ git branch --show-current feat_pr_from_repo_b_branch1 $ gst On branch feat_pr_from_repo_b_branch1 Changes not staged for commit: deleted: repo_a__test5 Untracked files: repo_a__test6 # commit the changes ``` Logs of codebuild-repo-b ```bash [Container] 2024/04/27 04:38:28.453263 Running command ls -ltrh total 4.0K -rw-r--r-- 1 root root 0 Apr 27 04:38 repo_a__test6 -rw-r--r-- 1 root root 273 Apr 27 04:38 buildspec.yaml ``` > Note: The branch name is refs/heads/feat_pr_from_repo_b_branch1 and has latest commit ================================================ FILE: home/cloud_providers/aws/taskset_aws_cloud_providers/task_005_trigger_codebuild_PR_events__eventbridge__lambda__dynamic_branches/buildspec.yaml ================================================ version: 0.2 phases: install: commands: - echo "Installing dependencies" pre_build: commands: - echo "Pre-build step" build: commands: - echo "Build phase" - ls -ltrh post_build: commands: - echo "Post-build step" ================================================ FILE: home/cloud_providers/aws/taskset_aws_cloud_providers/task_005_trigger_codebuild_PR_events__eventbridge__lambda__dynamic_branches/lambda.py ================================================ import json import boto3 def lambda_handler(event, context): # Initialize the CodeBuild client codebuild = boto3.client('codebuild') # Print the raw event data print("Raw Event Data: for repob : 1") print(json.dumps(event)) detail = event.get("detail", {}) repository_names = detail.get("repositoryNames", []) source_version = detail.get("sourceReference", "") if source_version and repository_names: try: # If codecommit reponame is repo-a then codebuild project name will be codebuild-repo-a project_name = "codebuild-" + repository_names[0] print(f"Project Name: {project_name}") # Trigger the build response = codebuild.start_build( projectName=project_name, sourceVersion=source_version ) print("CodeBuild Triggered Successfully") print(response) except Exception as e: print("Failed to trigger CodeBuild") print(str(e)) else: print("No source version or project name provided, cannot trigger CodeBuild.") # Return a successful response return { "statusCode": 200, "body": json.dumps("Event processed successfully") } ================================================ FILE: home/cloud_providers/aws/taskset_aws_cloud_providers/task_006_codebuild_codecommit_test_reports_gradle/ReadMe-static.md ================================================ # Building an Application with Spring Boot with Gradle Groovy, Unit Tests, and Reports - [Building an Application with Spring Boot with Gradle Groovy, Unit Tests, and Reports](#building-an-application-with-spring-boot-with-gradle-groovy-unit-tests-and-reports) - [Run the application](#run-the-application) - [Run Unit Tests](#run-unit-tests) - [Show reports directory](#show-reports-directory) For more info about the project check in folder `learn_java/task_006` in repo `learn_fullstack`. > If you are getting import package errors then you can try opening only specific task in VS Code by running `code .` in the terminal. Every task is a separate project and has its own dependencies. ```bash $ java --version openjdk 21.0.2 2024-01-16 OpenJDK Runtime Environment Homebrew (build 21.0.2) OpenJDK 64-Bit Server VM Homebrew (build 21.0.2, mixed mode, sharing) ``` ## Run the application ```bash # Install the dependencies using gradle kotlin ./gradlew build # Run the application ./gradlew bootRun ``` Output ```bash ... welcomePageNotAcceptableHandlerMapping <==========---> 80% EXECUTING [6m 2s] > :bootRun ``` Validate the application is running ```bash curl http://localhost:8080 ``` Output ```bash Greetings from Spring Boot! ``` ## Run Unit Tests Run the tests ```bash # Run the tests ./gradlew test # In a typical development workflow with Gradle, if the source code or tests have not been modified since the last build, Gradle will consider the tasks up-to-date and will not rerun them # To force Gradle to rerun the tests, you can use the --rerun-tasks option ./gradlew test --rerun-tasks ``` Output ```bash BUILD SUCCESSFUL in 567ms 4 actionable tasks: 4 up-to-date ``` ## Show reports directory ```bash ./gradlew showDirs ``` ```bash $ ./gradlew showDirs > Task :showDirs Reports directory: .../learn_java/taskset/task_006_building_an_application_with_spring_boot__gradle_groovy__unit_tests__reports/build/reports Test results directory: .../learn_java/taskset/task_006_building_an_application_with_spring_boot__gradle_groovy__unit_tests__reports/build/test-results BUILD SUCCESSFUL in 522ms 1 actionable task: 1 executed ``` ================================================ FILE: home/cloud_providers/aws/taskset_aws_cloud_providers/task_006_codebuild_codecommit_test_reports_gradle/ReadMe.md ================================================ # Codebuild Codecommit Gradle Project Unit Tests Reports - [Codebuild Codecommit Gradle Project Unit Tests Reports](#codebuild-codecommit-gradle-project-unit-tests-reports) - [Create a repository in CodeCommit : repo-d](#create-a-repository-in-codecommit--repo-d) - [Create a codebuild project : repo-d](#create-a-codebuild-project--repo-d) - [Go to the Report Groups section](#go-to-the-report-groups-section) [docs.aws.amazon.com » Create a test report](https://docs.aws.amazon.com/codebuild/latest/userguide/report-create.html) [Test Reports with AWS CodeBuild](https://aws.amazon.com/blogs/devops/test-reports-with-aws-codebuild) [docs.aws.amazon.com » Working with reports](https://docs.aws.amazon.com/codebuild/latest/userguide/test-report.html) [docs.aws.amazon.com » View test reports](https://docs.aws.amazon.com/codebuild/latest/userguide/test-view-reports.html) [docs.gradle.org » Test reporting](https://docs.gradle.org/current/userguide/java_testing.html#test_reporting) ## Create a repository in CodeCommit : repo-d ## Create a codebuild project : repo-d Use the `buildspec.yml` file in the root of the project. Make sure you give the right path to the test reports in the `buildspec.yml` file. The files get generated in the project in directory `build/test-results`. Trigger build manually using `Start build` button. ```yaml reports: GradleReports: files: - '**/*.xml' base-directory: 'build/test-results' ``` ## Go to the Report Groups section You should see `repo-d-GradleReports` group. [aws cli » list-report-groups](https://docs.aws.amazon.com/cli/latest/reference/codebuild/list-report-groups.html) [stackoverflow » turn off pager](https://stackoverflow.com/questions/60122188/how-to-turn-off-the-pager-for-aws-cli-return-value) ```bash AWS_PAGER="" aws codebuild list-report-groups ``` Get the reports ```bash AWS_PAGER="" aws codebuild list-reports-for-report-group --report-group-arn <> ``` ================================================ FILE: home/cloud_providers/aws/taskset_aws_cloud_providers/task_006_codebuild_codecommit_test_reports_gradle/buildspec.yaml ================================================ version: 0.2 phases: install: runtime-versions: java: corretto21 # Assuming corretto21 is available, replace with the specific version available in AWS CodeBuild commands: - echo "Checking Java version..." - java -version - echo "Checking Gradle version..." - gradle -v pre_build: commands: - echo "Preparing build..." - echo "Setting up JAVA_HOME..." - export JAVA_HOME=$(dirname $(dirname $(readlink -f $(which javac)))) - echo $JAVA_HOME build: commands: - echo "Build started on `date`" - echo "Running Gradle tasks..." - gradle clean build - gradle test post_build: commands: - echo "Build and test steps completed" reports: GradleReports: files: - '**/*.xml' base-directory: 'build/test-results' ================================================ FILE: home/cloud_providers/aws/taskset_aws_cloud_providers/task_007_codebuild_codecommit_test_reports_mvn/ReadMe.md ================================================ # AWS Codebuild Test Reports - [AWS Codebuild Test Reports](#aws-codebuild-test-reports) - [Docs](#docs) - [Create a repository in CodeCommit : repo-d](#create-a-repository-in-codecommit--repo-d) - [Create a codebuild project : repo-d](#create-a-codebuild-project--repo-d) - [Go to the Report Groups section](#go-to-the-report-groups-section) ## Docs [stackoverflow.com » Viewing Unit Test and Coverage Reports Generated in AWS CodeBuild](https://stackoverflow.com/questions/49664524/viewing-unit-test-and-coverage-reports-generated-in-aws-codebuild) [docs.aws.amazon.com » View test reports](https://docs.aws.amazon.com/codebuild/latest/userguide/test-view-reports.html) [docs.aws.amazon.com » Working with reports](https://docs.aws.amazon.com/codebuild/latest/userguide/test-report.html) [aws.amazon.com » Test Reports with AWS CodeBuild](https://aws.amazon.com/blogs/devops/test-reports-with-aws-codebuild/) ## Create a repository in CodeCommit : repo-d ## Create a codebuild project : repo-d Use the `buildspec.yml` file in the root of the project. Make sure you give the right path to the test reports in the `buildspec.yml` file. The files get generated in the project in directory `target/surefire-reports`. ```yaml reports: #New SurefireReports: # CodeBuild will create a report group called "SurefireReports". files: #Store all of the files - '**/*' base-directory: 'target/surefire-reports' # Location of the report ``` Trigger build manually using `Start build` button. ## Go to the Report Groups section You should see `repo-d-SurefireReports` group. [aws cli » list-report-groups](https://docs.aws.amazon.com/cli/latest/reference/codebuild/list-report-groups.html) [stackoverflow » turn off pager](https://stackoverflow.com/questions/60122188/how-to-turn-off-the-pager-for-aws-cli-return-value) ```bash AWS_PAGER="" aws codebuild list-report-groups ``` Get the reports ```bash AWS_PAGER="" aws codebuild list-reports-for-report-group --report-group-arn <> ``` ================================================ FILE: home/cloud_providers/aws/taskset_aws_cloud_providers/task_007_codebuild_codecommit_test_reports_mvn/buildspec.yaml ================================================ version: 0.2 phases: install: runtime-versions: java: corretto21 # Assuming corretto21 is available, replace with the specific version available in AWS CodeBuild commands: - echo "Checking Java version..." - java -version - echo "Checking mvn version..." - mvn -v build: commands: - java -version - echo Build started on `date` - mvn surefire-report:report #Running this task to execute unit tests and generate report. reports: #New SurefireReports: # CodeBuild will create a report group called "SurefireReports". files: #Store all of the files - '**/*' base-directory: 'target/surefire-reports' # Location of the report ================================================ FILE: home/cloud_providers/aws/taskset_aws_cloud_providers/task_008_trigger_lambda_from_codecommit_using_event_bridge/ReadMe.md ================================================ # Trigger Lambda from Codecommit [https://docs.aws.amazon.com/codecommit/latest/userguide/how-to-notify-lambda-cc.html](https://docs.aws.amazon.com/codecommit/latest/userguide/how-to-notify-lambda-cc.html) ================================================ FILE: home/cloud_providers/aws/taskset_aws_cloud_providers/task_009_pass_vars_dynamically_from_codebuild_to_codepipeline/ReadMe.md ================================================ # Pass variables dynamically from CodePipeline to CodeBuild [https://docs.aws.amazon.com/codepipeline/latest/userguide/tutorials-pipeline-variables.html](https://docs.aws.amazon.com/codepipeline/latest/userguide/tutorials-pipeline-variables.html) ```bash aws codepipeline start-pipeline-execution --name MyVariablesPipeline --variables name=timeout,value=2000 ``` ================================================ FILE: home/cloud_providers/azure/ReadMe.md ================================================ # taskset_azure_cloud_providers > [Auto](https://github.com/codeaprendiz/learn_fullstack/blob/main/home/php/intermediate/taskset_intermediate_php/task_004_createGlobalMarkdownTable/generate-readme.php) generated ReadMe. Number of tasks: 11 | Task | Description | |----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | task_001 | [task_001_capture_web_app_logs_with_app_service_diagnostics_logging](taskset_azure_cloud_providers/task_001_capture_web_app_logs_with_app_service_diagnostics_logging) | | task_002 | [task_002_devbox](taskset_azure_cloud_providers/task_002_devbox) | | task_003 | [task_003_create_a_windows_virtual_machine](taskset_azure_cloud_providers/task_003_create_a_windows_virtual_machine) | | task_004 | [task_004_connect_to_windows_virtual_machine_via_rdp](taskset_azure_cloud_providers/task_004_connect_to_windows_virtual_machine_via_rdp) | | task_005 | [task_005_create_a_generalized_image](taskset_azure_cloud_providers/task_005_create_a_generalized_image) | | task_006 | [task_006_create_a_new_virtual_machine_from_a_managed_image](taskset_azure_cloud_providers/task_006_create_a_new_virtual_machine_from_a_managed_image) | | task_007 | [task_007_create_an_image_of_azure_vm_from_az_cli_and_provision_a_new_vm](taskset_azure_cloud_providers/task_007_create_an_image_of_azure_vm_from_az_cli_and_provision_a_new_vm) | | task_008 | [task_008_create_an_azure_virtual_machine](taskset_azure_cloud_providers/task_008_create_an_azure_virtual_machine) | | task_009 | [task_009_configure_network_access](taskset_azure_cloud_providers/task_009_configure_network_access) | | task_010 | [task_010_create_a_storage_blob](taskset_azure_cloud_providers/task_010_create_a_storage_blob) | | task_011 | [task_011_deploy_a_container_app](taskset_azure_cloud_providers/task_011_deploy_a_container_app) | ================================================ FILE: home/cloud_providers/azure/taskset_azure_cloud_providers/task_001_capture_web_app_logs_with_app_service_diagnostics_logging/ReadMe.md ================================================ # Capture Web Application Logs with App Service Diagnostics Logging - [Capture Web Application Logs with App Service Diagnostics Logging](#capture-web-application-logs-with-app-service-diagnostics-logging) - [Learning objectives](#learning-objectives) - [Exercise - Enable and configure App Service application logging using the Azure portal](#exercise---enable-and-configure-app-service-application-logging-using-the-azure-portal) - [Use Azure CLI to deploy the webapp](#use-azure-cli-to-deploy-the-webapp) - [View live application logging with the log streaming service](#view-live-application-logging-with-the-log-streaming-service) - [View live application logging with the log streaming service using Azure CLI](#view-live-application-logging-with-the-log-streaming-service-using-azure-cli) - [Retrieve application log files](#retrieve-application-log-files) - [Exercise - Retrieve Application Log Files using Azure CLI and Kudu](#exercise---retrieve-application-log-files-using-azure-cli-and-kudu) [learn.microsoft.com » Capture Web Application Logs with App Service Diagnostics Logging](https://learn.microsoft.com/en-us/training/modules/capture-application-logs-app-service/) ## Learning objectives - Enable app logging on an Azure Web App. - View live app logging activity with the log streaming service. - Retrieve app log files from an app with Kudu or the Azure CLI. ### [Exercise - Enable and configure App Service application logging using the Azure portal](https://learn.microsoft.com/en-us/training/modules/capture-application-logs-app-service/3-enable-and-configure-app-service-application-logging-using-the-azure-portal) #### Use Azure CLI to deploy the webapp Set the variables ```bash gitRepo=https://github.com/MicrosoftDocs/mslearn-capture-application-logs-app-service appName="contosofashions$RANDOM" appPlan="contosofashionsAppPlan" resourceGroup=learn-73039af5-5f45-4665-837f-e4523655e6cd storageAccount=sa$appName appLocation=southeastasia ``` Deploy the webapp ```bash az appservice plan create --name $appPlan --resource-group $resourceGroup --location $appLocation --sku FREE az webapp create --name $appName --resource-group $resourceGroup --plan $appPlan --deployment-source-url $gitRepo ``` Create Storage Account ```bash az storage account create -n $storageAccount -g $resourceGroup -l $appLocation --sku Standard_LRS ``` #### View live application logging with the log streaming service To open the log stream, run the following command. ```bash az webapp log tail --name --resource-group ``` Reset user-level credentials ```bash az webapp deployment user set --user-name --password ``` After you have created a set of credentials, run the following command to open the log stream. You're then prompted for the password. ```bash # curl -u {username} https://{sitename}.scm.azurewebsites.net/api/logstream ``` ### [View live application logging with the log streaming service using Azure CLI](https://learn.microsoft.com/en-us/training/modules/capture-application-logs-app-service/5-view-live-application-logging-activity-with-the-log-streaming-service-using-azure-cli) Use Azure CLI to view the live log stream ```bash # az webapp log tail --resource-group learn-73039af5-5f45-4665-837f-e4523655e6cd --name contosofashions az webapp log tail --resource-group learn-73039af5-5f45-4665-837f-e4523655e6cd --name contosofashions10908 2023-12-06T08:34:47 Welcome, you are now connected to log-streaming service. The default timeout is 2 hours. Change the timeout with the App Setting SCM_LOGSTREAM_TIMEOUT (in seconds). 2023-12-06T08:35:16 PID[8672] Error Error message, in the Page_Load method for About.aspx 2023-12-06T08:35:34 PID[8672] Error Error message, in the Page_Load method for Default.aspx # expetect logs ``` ### Retrieve application log files - To download file system log files using the Azure CLI, ```bash az webapp log download --log-file \<_filename_\>.zip --resource-group \<_resource group name_\> --name \<_app name_\> ``` ### [Exercise - Retrieve Application Log Files using Azure CLI and Kudu](https://learn.microsoft.com/en-us/training/modules/capture-application-logs-app-service/7-retrieve-application-log-files-from-an-application-using-azure-cli-and-kudu) - In Cloud Shell, to download the logs to contosofashions.zip in the cloud share storage, ```bash # az webapp log download --log-file contosofashions.zip --resource-group learn-73039af5-5f45-4665-837f-e4523655e6cd --name contosofashions az webapp log download --log-file contosofashions.zip --resource-group learn-73039af5-5f45-4665-837f-e4523655e6cd --name contosofashions10908 ``` - In Cloud Shell, to show the log files contained in the downloaded contosofashions.zip file, run the following command. ```bash zipinfo -1 contosofashions.zip ``` - In Cloud Shell, to extract just the app log file from the downloaded contosofashions.zip file, run the following command. ```bash unzip -j contosofashions.zip LogFiles/Application/*.txt ``` - In Cloud Shell, to display the application log file, run the following command. ```bash code *.txt ``` ================================================ FILE: home/cloud_providers/azure/taskset_azure_cloud_providers/task_002_devbox/ReadMe.md ================================================ # Devbox - [Devbox](#devbox) - [What is Microsoft Dev Box?](#what-is-microsoft-dev-box) - [Key concepts for Microsoft Dev Box](#key-concepts-for-microsoft-dev-box) - [Quickstart: Configure Microsoft Dev Box](#quickstart-configure-microsoft-dev-box) - [Create a dev center](#create-a-dev-center) - [Create a project](#create-a-project) - [Create a dev box definition](#create-a-dev-box-definition) - [Create a dev box pool](#create-a-dev-box-pool) - [Provide access to a dev box project](#provide-access-to-a-dev-box-project) - [Quickstart: Create and connect to a dev box by using the Microsoft Dev Box developer portal](#quickstart-create-and-connect-to-a-dev-box-by-using-the-microsoft-dev-box-developer-portal) - [Create a dev box](#create-a-dev-box) - [Connect to a dev box](#connect-to-a-dev-box) - [Microsoft Dev Box architecture overview](#microsoft-dev-box-architecture-overview) - [How does Microsoft Dev Box work?](#how-does-microsoft-dev-box-work) - [Microsoft Dev Box architecture](#microsoft-dev-box-architecture) - [Network connectivity](#network-connectivity) - [Microsoft Intune integration](#microsoft-intune-integration) - [Identity services](#identity-services) ## [What is Microsoft Dev Box?](https://learn.microsoft.com/en-us/azure/dev-box/overview-what-is-microsoft-dev-box) - gives developers self-service access to ready-to-code cloud workstations called dev boxes - configure dev boxes with tools, source code, and prebuilt binaries - create your own customized image, or use a preconfigured image from Azure Marketplace - was designed with three organizational roles in mind: platform engineers, development team leads, and developers > A dev box is a virtual machine (VM) preconfigured with the tools and resources the developer needs for a project. ![img](https://learn.microsoft.com/en-us/azure/dev-box/media/overview-what-is-microsoft-dev-box/dev-box-roles.png#lightbox) ## [Key concepts for Microsoft Dev Box](https://learn.microsoft.com/en-us/azure/dev-box/concept-dev-box-concepts) ## [Quickstart: Configure Microsoft Dev Box](https://learn.microsoft.com/en-us/azure/dev-box/quickstart-configure-dev-box-service) Two phases - platform engineers configure the necessary Microsoft Dev Box resources through the Azure portal - users can proceed to the next phase, creating and managing their dev boxes through the developer portal steps required to configure Microsoft Dev Box in the Azure portal. ![img](https://learn.microsoft.com/en-us/azure/dev-box/media/quickstart-configure-dev-box-service/dev-box-build-stages.png#lightbox) ### [Create a dev center](https://learn.microsoft.com/en-us/azure/dev-box/quickstart-configure-dev-box-service#create-a-dev-center) provides a centralized place to manage a - collection of projects, - the configuration of available dev box images and sizes, and - the networking settings to enable access to organizational resources ### [Create a project](https://learn.microsoft.com/en-us/azure/dev-box/quickstart-configure-dev-box-service#create-a-project) - Dev box projects enable you to manage team-level settings - These settings include providing access to development teams so developers can create dev boxes. ### [Create a dev box definition](https://learn.microsoft.com/en-us/azure/dev-box/quickstart-configure-dev-box-service#create-a-dev-box-definition) - A dev box definition defines the VM image and the VM SKU (compute size + storage) that are used in the creation of the dev boxes. - The dev box definitions you create in a dev center are available for all projects associated with that dev center ### [Create a dev box pool](https://learn.microsoft.com/en-us/azure/dev-box/quickstart-configure-dev-box-service#create-a-dev-box-pool) - A dev box pool is the collection of dev boxes that have the same settings, such as the dev box definition and network connection. - Developers that have access to the project in the dev center, can then choose to create a dev box from a dev box pool. - Dev box pools define the location of the dev boxes through the specified network connection ### [Provide access to a dev box project](https://learn.microsoft.com/en-us/azure/dev-box/quickstart-configure-dev-box-service#provide-access-to-a-dev-box-project) - you must provide access for users through role assignments - Dev Box User role enables dev box users to create, manage, and delete their own dev boxes - You grant access for the user at the level of the project. ## [Quickstart: Create and connect to a dev box by using the Microsoft Dev Box developer portal](https://learn.microsoft.com/en-us/azure/dev-box/quickstart-create-dev-box) ### [Create a dev box](https://learn.microsoft.com/en-us/azure/dev-box/quickstart-create-dev-box#create-a-dev-box) ### [Connect to a dev box](https://learn.microsoft.com/en-us/azure/dev-box/quickstart-create-dev-box#connect-to-a-dev-box) ## [Microsoft Dev Box architecture overview](https://learn.microsoft.com/en-us/azure/dev-box/concept-dev-box-architecture) ### [How does Microsoft Dev Box work?](https://learn.microsoft.com/en-us/azure/dev-box/concept-dev-box-architecture) The following diagram gives an overview of the relationship between the different components in Microsoft Dev Box. ![img](https://learn.microsoft.com/en-us/azure/dev-box/media/concept-dev-box-architecture/dev-box-concepts-overview.png#lightbox) - dev center - dev center is the top-level resource for Microsoft Dev Box - dev center contains the collection of projects and the shared resources for these projects, such as dev box definitions and network connections - project - A dev box project is the point of access for development teams. - You assign a developer the Dev Box User role to a project to grant the developer permissions to create dev boxes. - dev box definition - dev box definition specifies the configuration of the dev boxes, such as the virtual machine image and compute resources for the dev boxes. - can either choose a VM image from the Azure Marketplace, or use an Azure compute gallery to use custom VM images. - dev box pools - project contains the collection of dev box pools - dev box pool specifies the configuration for dev boxes, such as the dev box definition, the network connection, and other settings - The network connection that is associated with a dev box pool determines where the dev box is hosted - dev box - Developers can create a dev box from a dev box pool by using the developer portal. ### [Microsoft Dev Box architecture](https://learn.microsoft.com/en-us/azure/dev-box/concept-dev-box-architecture#microsoft-dev-box-architecture) The following diagrams show the logical architecture of Microsoft Dev Box. ![img](https://learn.microsoft.com/en-us/azure/dev-box/media/concept-dev-box-architecture/dev-box-architecture-diagram.png#lightbox) For the network connection, you can also choose between a Microsoft-hosted network connection, and an Azure network connection that you create in your own subscription ### [Network connectivity](https://learn.microsoft.com/en-us/azure/dev-box/concept-dev-box-architecture#network-connectivity) - Network connections control where dev boxes are created and hosted, and enable you to connect to other Azure or corporate resources. - Depending on your level of control, you can use Microsoft-hosted network connections or bring your own Azure network connections. ### [Microsoft Intune integration](https://learn.microsoft.com/en-us/azure/dev-box/concept-dev-box-architecture#microsoft-intune-integration) - Microsoft Intune is used to manage your dev boxes. - Every Dev Box user needs one Microsoft Intune license and can create multiple dev boxes. ### [Identity services](https://learn.microsoft.com/en-us/azure/dev-box/concept-dev-box-architecture#identity-services) Microsoft Dev Box uses Microsoft Entra ID and, optionally, on-premises Active Directory Domain Services (AD DS). ================================================ FILE: home/cloud_providers/azure/taskset_azure_cloud_providers/task_003_create_a_windows_virtual_machine/ReadMe.md ================================================ # [Create a Windows virtual machine](https://learn.microsoft.com/en-us/training/modules/create-windows-virtual-machine-in-azure/3-exercise-create-a-vm) [Training Module » Create a Windows virtual machine in Azure](https://learn.microsoft.com/en-us/training/modules/create-windows-virtual-machine-in-azure/) ================================================ FILE: home/cloud_providers/azure/taskset_azure_cloud_providers/task_004_connect_to_windows_virtual_machine_via_rdp/ReadMe.md ================================================ # [Connect to a Windows virtual machine using RDP](https://learn.microsoft.com/en-us/training/modules/create-windows-virtual-machine-in-azure/5-exercise-connect-to-a-windows-vm-using-rdp) [Training Module » Create a Windows virtual machine in Azure](https://learn.microsoft.com/en-us/training/modules/create-windows-virtual-machine-in-azure/) To connect to an Azure VM with an RDP client, you'll need: - Public IP address of the VM (or private if the VM is configured to connect to your network) - Port number ================================================ FILE: home/cloud_providers/azure/taskset_azure_cloud_providers/task_005_create_a_generalized_image/ReadMe.md ================================================ # [Create a generalized image](https://learn.microsoft.com/en-us/training/modules/customize-windows-server-iaas-virtual-machine-images/2-create-generalized-image) [Training Module » Customize Windows Server IaaS Virtual Machine images](https://learn.microsoft.com/en-us/training/modules/customize-windows-server-iaas-virtual-machine-images/) [Video Reference : Create a managed image of a generalized virtual machine in Azure](https://learn.microsoft.com/en-us/training/modules/customize-windows-server-iaas-virtual-machine-images/4-create-managed-image-generalized-virtual-machine-azure) [what's the difference between deallocated and stopped ?](https://learn.microsoft.com/en-us/answers/questions/574969/whats-the-difference-between-deallocated-and-stopp) ## What are VM images? - When you create a VM, you must specify a VM image that contains a generalized operating system and optionally, other preconfigured software. - Azure uses the image to create a new virtual hard disk (VHD) from which it can start your VM ## What is a generalized image? - After you create a VM and customize it by configuring and installing additional applications according to your requirements, you can save it as a new image. - The new image will be a set of VHDs from which you can create additional VMs. - However, you need to clean up the image first, because when you create a VM the operating system data is updated with several items, including: - The host name of your VM. - The administrator username and credentials. - Log files. - Security identifiers for various operating system services. These items must be reset to their default settings before you capture an image. When you reset these items in a VM, you generalize the VM. ## Generalize a VM - Use the Sysprep.exe tool to generalize a Windows VM ![img](https://learn.microsoft.com/en-us/training/wwl-azure/customize-windows-server-iaas-virtual-machine-images/media/m6-system-preparation.png) - After the VM has been shut down, you should deallocate it while it's in this clean state. > [!NOTE] > The VM might display a state of Stopped, but it isn't deallocated. If you're using the Azure CLI, run the following command instead: ```bash az vm deallocate \ --resource-group \ --name ``` > [!TIP] > When you use the Azure portal to create an image from a VM it automatically deallocates the VM. > [!IMPORTANT] > Keep in mind that you continue to pay for compute resources if your VM is stopped but not deallocated. ================================================ FILE: home/cloud_providers/azure/taskset_azure_cloud_providers/task_006_create_a_new_virtual_machine_from_a_managed_image/ReadMe.md ================================================ # [Create a new Virtual Machine from a managed image](https://learn.microsoft.com/en-us/training/modules/customize-windows-server-iaas-virtual-machine-images/3-create-new-virtual-machine-managed-image) [Video Reference - Create a Virtual Machine from a managed image](https://learn.microsoft.com/en-us/training/modules/customize-windows-server-iaas-virtual-machine-images/5-demonstration-create-virtual-machine-managed-image) After you have generalized the VM, you can create a managed image. You can then create new VMs from this managed image. > [!CAUTION] > Capturing a VM image from a VM will make the VM unusable. Furthermore, this action can't be undone. ## Create a managed image from a generalized VM The managed image you create will include all of the disks associated with the generalized VM > [!NOTE] > The VM is in stopped (deallocated) state before you create the image. ```bash az image create \ --name \ --resource-group \ --source ``` ## Create a new VM from a managed image To create a new VM using Azure CLI, use the following command: ```bash az vm create \ --resource-group \ --name \ --image \ --location ``` If you review your list of VMs in the Azure portal, after creating your new VM from your managed image you'll notice that the new VM displays the source as Image. ================================================ FILE: home/cloud_providers/azure/taskset_azure_cloud_providers/task_007_create_an_image_of_azure_vm_from_az_cli_and_provision_a_new_vm/ReadMe.md ================================================ # [Exercise - Create an image of an Azure VM from the Azure CLI and provision a new VM](https://learn.microsoft.com/en-us/training/modules/deploy-vms-from-vhd-templates/4-exercise-create-image-provision-vm?pivots=windows-cloud) ## Set your default resource group ```bash az configure --defaults group="learn-fcd3cc98-3fab-45d5-b679-f2ddfaf86f6c" ``` ## Create a virtual machine Create ```bash az vm create --name MyWindowsVM --image Win2019Datacenter --admin-username azureuser ``` install IIS and set up a default webpage: ```bash az vm extension set --name CustomScriptExtension --vm-name MyWindowsVM --publisher Microsoft.Compute --settings '{"commandToExecute":"powershell Add-WindowsFeature Web-Server; Add-Content -Path \"C:\\inetpub\\wwwroot\\Default.htm\" -Value $(hostname)"}' ``` open port 80 to the web server: ```bash az vm open-port --name MyWindowsVM --port 80 ``` get public ip ```bash echo http://$(az vm list-ip-addresses --name MyWindowsVM --query "[].virtualMachine.network.publicIpAddresses[*].ipAddress" --output tsv) ``` ## Generalize the virtual machine ![img](https://learn.microsoft.com/en-us/training/modules/deploy-vms-from-vhd-templates/media/4-sysprep.png) deallocate the virtual machine ```bash az vm deallocate --name MyWindowsVM ``` generalize the virtual machine: ```bash az vm generalize --name MyWindowsVM ``` ## Create a virtual machine image ```bash az image create --name MyVMIMage --source MyWindowsVM ``` ## Create a virtual machine by using the new image Create a new virtual machine from the image: ```bash az vm create --name MyVMFromImage --computer-name MyVMFromImage --image MyVMImage --admin-username azureuser ``` update the default web page with the server name: ```bash az vm extension set --name CustomScriptExtension --vm-name MyVMFromImage --publisher Microsoft.Compute --settings '{"commandToExecute":"powershell Clear-Content -Path \"C:\\inetpub\\wwwroot\\Default.htm\"; Add-Content -Path \"C:\\inetpub\\wwwroot\\Default.htm\" -Value $(hostname)"}' ``` open port 80 to the web server: ```bash az vm open-port --name MyVMFromImage --port 80 ``` get public ip ```bash echo http://$(az vm list-ip-addresses --name MyVMFromImage --query "[].virtualMachine.network.publicIpAddresses[*].ipAddress" --output tsv) ``` ================================================ FILE: home/cloud_providers/azure/taskset_azure_cloud_providers/task_008_create_an_azure_virtual_machine/ReadMe.md ================================================ # [Create an Azure virtual machine](https://learn.microsoft.com/en-us/training/modules/describe-azure-compute-networking-services/3-exercise-create-azure-virtual-machine) run the following az vm create command to create a Linux VM: ```bash az vm create --resource-group "" --name my-vm --public-ip-sku Standard --image Ubuntu2204 --admin-username azureuser --generate-ssh-keys ``` Run the following az vm extension set command to configure Nginx on your VM: ```bash az vm extension set --resource-group "" --vm-name my-vm --name customScript --publisher Microsoft.Azure.Extensions --version 2.1 --settings '{"fileUris":["https://raw.githubusercontent.com/MicrosoftDocs/mslearn-welcome-to-azure/master/configure-nginx.sh"]}' --protected-settings '{"commandToExecute": "./configure-nginx.sh"}' ``` ================================================ FILE: home/cloud_providers/azure/taskset_azure_cloud_providers/task_009_configure_network_access/ReadMe.md ================================================ # [Configure Network Access](https://learn.microsoft.com/en-us/training/modules/describe-azure-compute-networking-services/9-exercise-configure-network-access) > [!NOTE] > you can associate same NSG for multiple network interfaces and/or subnets > [!TIP] > A network interface (NIC) enables an Azure virtual machine (VM) to communicate with internet, Azure, and on-premises resources. [Create, change, or delete a network interface](https://learn.microsoft.com/en-us/azure/virtual-network/virtual-network-network-interface?tabs=azure-portal) [Network security groups](https://learn.microsoft.com/en-us/azure/virtual-network/network-security-groups-overview) [NSG-Can i assigned 1 NSG to multiple Instance , what is the limit](https://learn.microsoft.com/en-us/answers/questions/1353503/nsg-can-i-assigned-1-nsg-to-multiple-instance-what) ## Access your web server List VMs ```bash az vm list ``` Get VM IP ```bash IPADDRESS="$(az vm list-ip-addresses --resource-group "" --name my-vm --query "[].virtualMachine.network.publicIpAddresses[*].ipAddress" --output tsv)" ``` curl to download homepage ```bash curl --connect-timeout 5 http://$IPADDRESS ``` Output ```bash curl: (28) Failed to connect to 13.64.199.115 port 80 after 5002 ms: Timeout was reached ``` ## List the current network security group rules Run the following az network nsg list command to list the network security groups that are associated with your VM: ```bash az network nsg list --resource-group "" --query '[].name' --output tsv ``` Output ```bash my-vmNSG ``` Run the following az network nsg rule list command to list the rules associated with the NSG named my-vmNSG: ```bash az network nsg rule list --resource-group "" --nsg-name my-vmNSG ``` ## Create the network security rule Run the following az network nsg rule create command to create a rule called allow-http that allows inbound access on port 80: ```bash az network nsg rule create --resource-group "" --nsg-name my-vmNSG --name allow-http --protocol tcp --priority 100 --destination-port-range 80 --access Allow ``` Validate ```bash az network nsg rule list --resource-group "" --nsg-name my-vmNSG --query '[].{Name:name, Priority:priority, Port:destinationPortRange, Access:access}' --output table ``` Output ```bash Name Priority Port Access ----------------- ---------- ------ -------- default-allow-ssh 1000 22 Allow allow-http 100 80 Allow ``` ## Access your web server again ```bash curl --connect-timeout 5 http://$IPADDRESS ``` Output ```bash

Welcome to Azure! My name is my-vm.

``` ## Resources created in security group ```bash az resource list --resource-group --query "[].type" -o tsv Microsoft.Storage/storageAccounts Microsoft.Network/publicIPAddresses Microsoft.Network/networkSecurityGroups Microsoft.Network/virtualNetworks Microsoft.Network/networkInterfaces Microsoft.Compute/virtualMachines Microsoft.Compute/disks Microsoft.Compute/virtualMachines/extensions ``` ================================================ FILE: home/cloud_providers/azure/taskset_azure_cloud_providers/task_010_create_a_storage_blob/ReadMe.md ================================================ # [Create a storage blob](https://learn.microsoft.com/en-us/training/modules/describe-azure-storage-services/5-exercise-create-storage-blob) ## Create a storage account ## Work with blob storage ## Change the access level of your blob ================================================ FILE: home/cloud_providers/azure/taskset_azure_cloud_providers/task_011_deploy_a_container_app/ReadMe.md ================================================ # [Exercise - Deploy a container app](https://learn.microsoft.com/en-us/training/modules/implement-azure-container-apps/3-exercise-deploy-app) ## Prepare your environment Install the Azure Container Apps extension for the CLI. ```bash az extension add --name containerapp --upgrade ``` Register the Microsoft.App namespace. ```bash az provider register --namespace Microsoft.App ``` Register the Microsoft.OperationalInsights provider for the Azure Monitor Log Analytics workspace ```bash az provider register --namespace Microsoft.OperationalInsights ``` Set environment variables used later in this exercise. Replace with a region near you. ```bash RANOM=22388202393912 myRG=az204-appcont-rg myLocation=eastus myAppContEnv=az204-env-$RANDOM ``` Create the resource group for your container app. ```bash az group create \ --name $myRG \ --location $myLocation ``` ## Create an environment ```bash az containerapp env create \ --name $myAppContEnv \ --resource-group $myRG \ --location $myLocation ``` ## Create a container app ```bash az containerapp create \ --name my-container-app \ --resource-group $myRG \ --environment $myAppContEnv \ --image mcr.microsoft.com/azuredocs/containerapps-helloworld:latest \ --target-port 80 \ --ingress 'external' \ --query properties.configuration.ingress.fqdn ``` ## Verify deployment Select the link returned by the az containerapp create command to verify the container app is running. ## Clean up resources ```bash az group delete --name $myRG ``` ================================================ FILE: home/cloud_providers/gcp/ReadMe-static.md ================================================ # GCP Taskset References - [docs/regions-zones](https://cloud.google.com/compute/docs/regions-zones) - [https://cloud.google.com/about/locations](https://cloud.google.com/about/locations) - [docs/creating-managing-projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects) - [docs/grant-role-console](https://cloud.google.com/iam/docs/grant-role-console) - [shell/docs](https://cloud.google.com/shell/docs) - [architecture/identity](https://cloud.google.com/architecture/identity) - [docs/cloud-platform-resource-hierarchy](https://cloud.google.com/resource-manager/docs/cloud-platform-resource-hierarchy) - [https://cloud.google.com/products/calculator](https://cloud.google.com/products/calculator) - [service-accounts](https://www.cloudskillsboost.google/course_sessions/2028816/video/343131) - [cloud identify](https://www.cloudskillsboost.google/course_sessions/2028816/video/343132) ================================================ FILE: home/cloud_providers/gcp/ReadMe.md ================================================ # taskset_gcp_cloud_providers > [Auto](https://github.com/codeaprendiz/learn_fullstack/blob/main/home/php/intermediate/taskset_intermediate_php/task_004_createGlobalMarkdownTable/generate-readme.php) generated ReadMe. Number of tasks: 42 | Task | Description | |----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | task_001 | [task_001_create_budget__and_alerts](taskset_gcp_cloud_providers/task_001_create_budget__and_alerts) | | task_001 | [task_001_intro_console_projects_iam_apis](taskset_gcp_cloud_providers/task_001_intro_console_projects_iam_apis) | | task_002 | [task_002_getting_started_with_cloud_market_place](taskset_gcp_cloud_providers/task_002_getting_started_with_cloud_market_place) | | task_002 | [task_002_getting_started_with_cloud_market_place__lamp_stack](taskset_gcp_cloud_providers/task_002_getting_started_with_cloud_market_place__lamp_stack) | | task_003 | [task_003_vpc_networking_and_google_compute_engine](taskset_gcp_cloud_providers/task_003_vpc_networking_and_google_compute_engine) | | task_004 | [task_004_getting_started_with_cloud_storage_and_cloud_sql__php](taskset_gcp_cloud_providers/task_004_getting_started_with_cloud_storage_and_cloud_sql__php) | | task_005 | [task_005_getting_started_with_gke__nginx](taskset_gcp_cloud_providers/task_005_getting_started_with_gke__nginx) | | task_006 | [task_006_hello_cloud_run__node](taskset_gcp_cloud_providers/task_006_hello_cloud_run__node) | | task_007 | [task_007_building_a_devops_pipeline__python](taskset_gcp_cloud_providers/task_007_building_a_devops_pipeline__python) | | task_008 | [task_008_deploying_app_to_app_engine_and_gke_and_cloudrun](taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun) | | task_008 | [task_008_deploying_app_to_app_engine_and_gke_and_cloudrun__python](taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun__python) | | task_009 | [task_009_monitoring_applications_in_gcp__python](taskset_gcp_cloud_providers/task_009_monitoring_applications_in_gcp__python) | | task_010 | [task_010_alerting_in_google_cloud](taskset_gcp_cloud_providers/task_010_alerting_in_google_cloud) | | task_010 | [task_010_alerting_in_google_cloud__python](taskset_gcp_cloud_providers/task_010_alerting_in_google_cloud__python) | | task_011 | [task_011_service_monitoring__node](taskset_gcp_cloud_providers/task_011_service_monitoring__node) | | task_011 | [task_011_service_monitoring__node_app](taskset_gcp_cloud_providers/task_011_service_monitoring__node_app) | | task_012 | [task_012_monitoring_and_dashboarding_multiple_projects_from_a_single_workspace](taskset_gcp_cloud_providers/task_012_monitoring_and_dashboarding_multiple_projects_from_a_single_workspace) | | task_012 | [task_012_monitoring_and_dashboarding_multiple_projects_from_a_single_workspace__nginx](taskset_gcp_cloud_providers/task_012_monitoring_and_dashboarding_multiple_projects_from_a_single_workspace__nginx) | | task_013 | [task_013_compute_logging_and_monitoring](taskset_gcp_cloud_providers/task_013_compute_logging_and_monitoring) | | task_014 | [task_014_log_analysis](taskset_gcp_cloud_providers/task_014_log_analysis) | | task_014 | [task_014_log_analysis_cloud_run__node](taskset_gcp_cloud_providers/task_014_log_analysis_cloud_run__node) | | task_015 | [task_015_cloud_audit_logs](taskset_gcp_cloud_providers/task_015_cloud_audit_logs) | | task_015 | [task_015_cloud_storage_audit_logs](taskset_gcp_cloud_providers/task_015_cloud_storage_audit_logs) | | task_016 | [task_016_analyzing_network_traffic_with_vpc_flow_logs](taskset_gcp_cloud_providers/task_016_analyzing_network_traffic_with_vpc_flow_logs) | | task_017 | [task_017_application_performance_management](taskset_gcp_cloud_providers/task_017_application_performance_management) | | task_017 | [task_017_application_performance_management__python_and_nodejs](taskset_gcp_cloud_providers/task_017_application_performance_management__python_and_nodejs) | | task_018 | [task_018_2inst_2buckets_2iam](taskset_gcp_cloud_providers/task_018_2inst_2buckets_2iam) | | task_019 | [task_019_working_with_cloud_build](taskset_gcp_cloud_providers/task_019_working_with_cloud_build) | | task_020 | [task_020_deploying_google_kubernetes_engine](taskset_gcp_cloud_providers/task_020_deploying_google_kubernetes_engine) | | task_021 | [task_021_creating_google_kubernetes_engine_deployments](taskset_gcp_cloud_providers/task_021_creating_google_kubernetes_engine_deployments) | | task_022 | [task_022_configuring_persistent_storage_for_google_kubernetes_engine](taskset_gcp_cloud_providers/task_022_configuring_persistent_storage_for_google_kubernetes_engine) | | task_023 | [task_023_anthos_service_mesh_walkthrough](taskset_gcp_cloud_providers/task_023_anthos_service_mesh_walkthrough) | | task_024 | [task_024_observing_anthos_services](taskset_gcp_cloud_providers/task_024_observing_anthos_services) | | task_025 | [task_025_managing_traffic_with_anthos_service_mesh](taskset_gcp_cloud_providers/task_025_managing_traffic_with_anthos_service_mesh) | | task_026 | [task_026_securing_traffic_through_anthos_service_mesh](taskset_gcp_cloud_providers/task_026_securing_traffic_through_anthos_service_mesh) | | task_027 | [task_027_cloud_source_repositories_overview](taskset_gcp_cloud_providers/task_027_cloud_source_repositories_overview) | | task_028 | [task_028_managing_deployments_using_kubernetes_engine](taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine) | | task_029 | [task_029_trouble_shooting_workloads_on_gke_for_sre](taskset_gcp_cloud_providers/task_029_trouble_shooting_workloads_on_gke_for_sre) | | task_030 | [task_030_minimal_nodejs_app_dockerize_google_artifact_registry](taskset_gcp_cloud_providers/task_030_minimal_nodejs_app_dockerize_google_artifact_registry) | | task_031 | [task_031_hello_node_kubernetes__node](taskset_gcp_cloud_providers/task_031_hello_node_kubernetes__node) | | task_032 | [task_032_setting_up_jenkins_on_kubernetes_engine](taskset_gcp_cloud_providers/task_032_setting_up_jenkins_on_kubernetes_engine) | | task_033 | [task_033_continuous_delivery_with_jenkins_in_kubernetes_engine](taskset_gcp_cloud_providers/task_033_continuous_delivery_with_jenkins_in_kubernetes_engine) | ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_001_create_budget__and_alerts/ReadMe.md ================================================ ## Create budget and alerts [billing/docs/how-to/budgets](https://cloud.google.com/billing/docs/how-to/budgets) ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_001_intro_console_projects_iam_apis/ReadMe.md ================================================ # Get Familiar With Console, Projects, Roles And Permissions, APIs And Services [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Searching though available products and services in the GCP console. - Accessing projects in console. - Accessing roles and permissions in console i.e. Identify and Access Management - Enable specific API for any product **Skills** - gcp - gcp-console - gcp-projects - gcp-iam - gcp-apis > Task : Searching through the available products and services ## Project - A Google Cloud [project](https://cloud.google.com/docs/overview/#projects) is an organizing entity for your Google Cloud resources. It often contains resources and services. - Your project has a name, number, and ID > Task : How do we see all the projects ## Roles and permissions [IAM Overview](https://cloud.google.com/iam/docs/overview) [course_sessions/2028816/video/343130](https://www.cloudskillsboost.google/course_sessions/2028816/video/343130) - Google Cloud also contains a collection of permissions and roles that define who has access to what resources - You can use the [CloudIAM](https://cloud.google.com/iam/) console for the same. > Task : How do we navigate to CloudIAM console ## APIs And Services - When you create your own Google Cloud projects outside of the lab environment, you will have to enable APIs yourself. > Task : Enable the Dialogflow API ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_002_getting_started_with_cloud_market_place/ReadMe.md ================================================ # Getting Started With Cloud Marketplace [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) > Task : We will use the Cloud MarketPlace to deploy a LAMP stack ## High Level Tasks - Search for "LAMP Packaged by Bitnami" in the Marketplace - Launch - You should see the status as deployed as it is completed - Go to the site address - SSH - In the created SSH window ```bash cd /opt/bitnami sudo sh -c 'echo "" > apache2/htdocs/phpinfo.php' ``` - Open the `SITE_ADDRESS` again to view your changes ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_002_getting_started_with_cloud_market_place__lamp_stack/ReadMe.md ================================================ # Getting Started With Cloud Marketplace [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) **High Level Objectives** - To launch a LAMP stack from GCP marketplace - Validate the changes by going to the public endpoint created **Skills** - gcp - cloud-marketplace > Task : We will use the Cloud MarketPlace to deploy a LAMP stack ## High Level Tasks - Search for "LAMP Packaged by Bitnami" in the Marketplace - Launch - You should see the status as deployed as it is completed - Go to the site address - SSH - In the created SSH window ```bash cd /opt/bitnami sudo sh -c 'echo "" > apache2/htdocs/phpinfo.php' ``` - Open the `SITE_ADDRESS` again to view your changes ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_003_vpc_networking_and_google_compute_engine/ReadMe.md ================================================ # Getting started with VPC Networking and Google Compute Engine [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Explore the default network - Delete the firewall rules and default network - Create VPC network and VM instances - Explore the connectivity for VM instances **Skills** - gcp - gcp-networking - gcp-compute-engine - gcp-vpc-networking - gcp-vm-instance - gcp-ssh ## Explore the default network - View the subnets - View the routes - Notice that there is a route for each subnet and one for the Default internet gateway (0.0.0.0/0) - View the Firewall rules - there are 4 Ingress firewall rules for the default network: - default-allow-icmp - default-allow-rdp - default-allow-ssh - default-allow-internal - Note: These firewall rules allow ICMP, RDP, and SSH ingress traffic from anywhere (0.0.0.0/0) and all TCP, UDP, and ICMP traffic within the network (10.128.0.0/9). The Targets, Filters, Protocols/ports, and Action columns explain these rules. ## Delete the Firewall Rules and Default Network (only for testing, don't do it in production :) ) - Delete the Firewall rules - Delete the default network - Try to create a VM instance - Notice the error ## Create a VPC network and VM instances - Create a VPC using automode `mynetwork` - Create a VM instance in us-central1 | Property | Value | |----------|---------------| | Name | mynet-us-vm | | Region | us-central1 | | Zone | us-central1-c | - The Internal IP should be 10.128.0.2 because 10.128.0.1 is reserved for the gateway and you have not configured any other instances in that subnet. - Create a VM instance in europe-central2 | Property | Value | |----------|-------------------| | Name | mynet-eu-vm | | Region | europe-central2 | | Zone | europe-central2-a | - The Internal IP should be 10.186.0.2 because 10.186.0.1 is reserved for the gateway and you have not configured any other instances in that subnet. ## Explore the connectivity for VM instances - For mynet-us-vm, click SSH to launch a terminal and connect. ```bash ping -c 3 ping -c 3 ``` - Remove the allow-icmp firewall rules ```bash ping -c 3 # The 100% packet loss indicates that you cannot ping mynet-eu-vm's external IP. This is expected because you deleted the allow-icmp firewall rule! ping -c 3 ``` - Remove the allow-custom firewall rules ```bash # Note: The 100% packet loss indicates that you cannot ping mynet-eu-vm's internal IP. This is expected because you deleted the allow-custom firewall rule! ping -c 3 ``` - Remove the allow-ssh firewall rules - Note: The Connection failed message indicates that you cannot SSH to mynet-us-vm because you deleted the allow-ssh firewall rule! ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_004_getting_started_with_cloud_storage_and_cloud_sql__php/ReadMe.md ================================================ # Getting Started with Cloud Storage and Cloud SQL [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Deploy a webserver VM instance - Create a Cloud Storage bucket using gsutil command line - Create a Cloud SQL instance - Configure the application in compute engine instance to use Cloud SQL - Configure the application in compute engine instance to use Cloud Storage Object **Skills** - gcp - cloud storage - cloud sql - php - cloud storage object - vm instance ## Deploy a WebServer VM instance - Create instance `bloghost` with `Debian GNU/Linux 11 (bullseye)` image and ensure that you allow `http` traffic - Add the following startup script ```bash apt-get update apt-get install apache2 php php-mysql -y service apache2 restart ``` ## Create a Cloud Storage bucket using the gsutil command line - Use cloudshell for the following ```bash export LOCATION=US echo $DEVSHELL_PROJECT_ID ## Create bucket gsutil mb -l $LOCATION gs://$DEVSHELL_PROJECT_ID ## Copy image from public bucket to your local i.e. cloudshell gsutil cp gs://cloud-training/gcpfci/my-excellent-blog.png my-excellent-blog.png ## Copy from cloudshell to new bucket gsutil cp my-excellent-blog.png gs://$DEVSHELL_PROJECT_ID/my-excellent-blog.png ## Modify the Access Control List of the object you just created so that it is readable by everyone gsutil acl ch -u allUsers:R gs://$DEVSHELL_PROJECT_ID/my-excellent-blog.png ``` ## Create the Cloud SQL instance - Create SQL instance with db engine as MySQL, instance id as `blog-db`, password as `rootpass` - Use Single Zone - Wait for it to get created - Go to `Users` and add user `blogdbuser` and give password as `blogdbuserpassword` - Go to `Connections`. Give name as `web front end`. For external IP give `bloghostVM_public_ip/32` ## Configure an application in a Compute Engine instance to use Cloud SQL - SSH to bloghost - Run the following ```bash cd /var/www/html sudo vi index.php ``` - and paste content into the file ```php Welcome to my excellent blog

Welcome to my excellent blog

``` - Save and restart ```bash sudo service apache2 restart ``` - Visit `bloghostVM_publicIP/index.php` - Edit the file and add `CLOUDSQLIP` and `DBPASSWORD` - Restart and visit again ## Configure an application in a Compute Engine instance to use a Cloud Storage object - Go to buckets and copy public url of `my-excellent-blog.png` - Add this to index.php ```html ``` - Restart and visit again ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_004_getting_started_with_cloud_storage_and_cloud_sql__php/index.php ================================================ Welcome to my excellent blog

Welcome to my excellent blog

================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_005_getting_started_with_gke__nginx/ReadMe.md ================================================ # Getting started with GKE [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Check if required APIs are enabled - Start a Kubernetes Engine Cluster - Run and deploy nginx container - Expose deployment via LB - Validate the changes by going to the external IP created. **Skills** - gcp - gke - kubernetes - container - nginx - loadbalancer - cloudshell - external ip ## Check if the APIs are enabled - Kubernetes Engine API - Container Registry API ## Start a Kubernetes Engine cluster - Run the following commands in cloudshell ```bash # Set the zone export MY_ZONE=us-central1-a # Create k8s cluster gcloud container clusters create webfrontend --zone $MY_ZONE --num-nodes 2 ``` - Once completed ```bash kubectl get nodes ``` ## Run and deploy a container ```bash # Deploy nginx container kubectl create deploy nginx --image=nginx:1.17.10 # Check the pods kubectl get pods # Expose the deployment to the internet by creating the LoadBalancer type of Service kubectl expose deployment nginx --port 80 --type LoadBalancer # Check the services kubectl get services # Note no external IP is created yet. ``` - Once the external IP is created, visit the pubic IP. You should see the nginx page. ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_006_hello_cloud_run__node/ReadMe.md ================================================ # Hello Cloud Run [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Enable CloudRun API, configure Shell env - Write a sample minimal node application - Containerize app using CloudBuild - Upload to container Registry - Deploy the application to CloudRun **Skills** - gcp - cloudrun - nodejs - docker - cloudbuild - artifactregistry - cloudshell - webpreview - cloudshelleditor ## Enable the Cloud Run API and configure your Shell environment - Run the following in cloud shell ```bash gcloud services enable run.googleapis.com ## Set the compute region gcloud config set compute/region us-central1 ## Set the LOCATION ENV variable LOCATION="us-central1" ``` ## Write the sample node application - Run the following in cloud shell ```bash mkdir helloworld && cd helloworld touch package.json ``` - package.json ```json { "name": "helloworld", "description": "Simple hello world sample in Node", "version": "1.0.0", "main": "index.js", "scripts": { "start": "node index.js" }, "dependencies": { "express": "^4.17.1" } } ``` - Create file `index.js` ```bash touch index.js ``` - index.js ``` const express = require('express'); const app = express(); const port = process.env.PORT || 8080; app.get('/', (req, res) => { const name = process.env.NAME || 'World'; res.send(`Hello ${name}!`); }); app.listen(port, () => { console.log(`helloworld: listening on port ${port}`); }); ``` ## Containerize your app using CloudBuild and upload it to Artifact Registry - Create a docker file - Dockerfile ``` # Use the official lightweight Node.js 12 image. # https://hub.docker.com/_/node FROM node:12-slim # Create and change to the app directory. WORKDIR /usr/src/app # Copy application dependency manifests to the container image. # A wildcard is used to ensure copying both package.json AND package-lock.json (when available). # Copying this first prevents re-running npm install on every code change. COPY package*.json ./ # Install production dependencies. # If you add a package-lock.json, speed your build by switching to 'npm ci'. # RUN npm ci --only=production RUN npm install --only=production # Copy local code to the container image. COPY . ./ # Run the web service on container startup. CMD [ "npm", "start" ] ``` - Let's build the container using `CloudBuild` ```bash gcloud builds submit --tag gcr.io/$GOOGLE_CLOUD_PROJECT/helloworld ## List the images gcloud container images list ``` - You can go to `CloudBuild` on the console as well and check - Run the image locally ```bash docker run -d -p 8080:8080 gcr.io/$GOOGLE_CLOUD_PROJECT/helloworld ``` - Preview the same in the WebPreview ## Deploy to Cloud Run - Run the following in cloud shell ```bash # allow-unauthenticated flag in the command above makes your service publicly accessible. # When prompted confirm the service name by pressing Enter gcloud run deploy --image gcr.io/$GOOGLE_CLOUD_PROJECT/helloworld --allow-unauthenticated --region=$LOCATION # On success, the command line displays the service URL ``` - You can now visit your deployed container by opening the service URL in any browser window. ## Clean up ```bash # Delete the helloworld container image gcloud container images delete gcr.io/$GOOGLE_CLOUD_PROJECT/helloworld # delete the cloudrun service gcloud run services delete helloworld --region=us-central1 ``` ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_006_hello_cloud_run__node/hello-world-node/Dockerfile ================================================ # Use the official lightweight Node.js 12 image. # https://hub.docker.com/_/node FROM node:12-slim # Create and change to the app directory. WORKDIR /usr/src/app # Copy application dependency manifests to the container image. # A wildcard is used to ensure copying both package.json AND package-lock.json (when available). # Copying this first prevents re-running npm install on every code change. COPY package*.json ./ # Install production dependencies. # If you add a package-lock.json, speed your build by switching to 'npm ci'. # RUN npm ci --only=production RUN npm install --only=production # Copy local code to the container image. COPY . ./ # Run the web service on container startup. CMD [ "npm", "start" ] ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_006_hello_cloud_run__node/hello-world-node/index.js ================================================ const express = require('express'); const app = express(); const port = process.env.PORT || 8080; app.get('/', (req, res) => { const name = process.env.NAME || 'World'; res.send(`Hello ${name}!`); }); app.listen(port, () => { console.log(`helloworld: listening on port ${port}`); }); ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_006_hello_cloud_run__node/hello-world-node/package.json ================================================ { "name": "helloworld", "description": "Simple hello world sample in Node", "version": "1.0.0", "main": "index.js", "scripts": { "start": "node index.js" }, "dependencies": { "express": "^4.17.1" } } ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_007_building_a_devops_pipeline__python/Dockerfile ================================================ FROM python:3.7 WORKDIR /app COPY . . RUN pip install gunicorn RUN pip install -r requirements.txt ENV PORT=80 CMD exec gunicorn --bind :$PORT --workers 1 --threads 8 main:app ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_007_building_a_devops_pipeline__python/ReadMe.md ================================================ # Building a DevOps Pipeline [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Create a Git repository - Create a simple Python application - Test Your web application in Cloud Shell - Define a Docker build - Manage Docker images with Cloud Build and Container Registry - Automate builds with triggers - Test your build changes **Skills** - gcp - devops - python - docker - cloud-build - cloud-shell - cloud-registry ![.images/devops-pipeline.png](.images/devops-pipeline.png) ### Create a Git repository - Use service `Source Repositories` - Name : `devops-repo` - Create - Activate CloudShell - Clone the repo ```bash mkdir gcp-course cd gcp-course gcloud source repos clone devops-repo cd devops-repo ``` ### Create a simple Python application - Create the req files and folders - Run the following ```bash cd ~/gcp-course/devops-repo git add --all git config --global user.email "you@example.com" git config --global user.name "Your Name" git commit -a -m "Initial Commit" git push origin master ``` - Create Dockerfile ### Manage Docker images with Cloud Build and Container Registry ```bash cd ~/gcp-course/devops-repo echo $DEVSHELL_PROJECT_ID gcloud builds submit --tag gcr.io/$DEVSHELL_PROJECT_ID/devops-image:v0.1 . ``` - Check the CloudBuild and ContainerRegistry now - Let's deploy the container to compute > Container Image : gcr.io//devops-image:v0.1 - Allow http traffic ```bash cd ~/gcp-course/devops-repo git add --all git commit -am "Added Docker Support" git push origin master ``` - Visit the public IP now ### Automate builds with triggers - Go to the CloudBuild - Create Trigger - Select `devops-repo` and `.*(any branch)` - Choose `Dockerfile` for configuration - Create - Manually run the trigger once - Go to history and check the builds - Check the container registry for the new folder `devops-repo` - Make changes in the `main.py` file and commit again. ```bash cd ~/gcp-course/devops-repo git commit -a -m "Testing Build Trigger" git push origin master ``` ### Test your build changes - Check the build history in CloudBuilds and copy the Image link, format should be gcr.io/qwiklabs-gcp-00-f23112/devops-repoxx34345xx. - Create a new compute engine with the new tag and allow http traffic ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_007_building_a_devops_pipeline__python/main.py ================================================ from flask import Flask, render_template, request app = Flask(__name__) @app.route("/") def main(): model = {"title": "Hello DevOps Fans."} return render_template('index.html', model=model) if __name__ == "__main__": app.run(host='0.0.0.0', port=8080, debug=True, threaded=True) ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_007_building_a_devops_pipeline__python/requirements.txt ================================================ Flask==2.0.3 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_007_building_a_devops_pipeline__python/templates/index.html ================================================ {% extends "layout.html" %} {% block content %}

{{model.title}}

{% endblock %} ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_007_building_a_devops_pipeline__python/templates/layout.html ================================================ {{model.title}}
{% block content %}{% endblock %}
================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun/Dockerfile ================================================ FROM python:3.7 WORKDIR /app COPY . . RUN pip install gunicorn RUN pip install -r requirements.txt ENV PORT=8080 CMD exec gunicorn --bind :$PORT --workers 1 --threads 8 main:app ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun/ReadMe.md ================================================ # Deploying same Python app to AppEngine, GKE, CloudRun [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) ## Objective ![img.png](.images/arch-objective.png) ```bash docker build -t test-python . docker run --rm -p 8080:8080 test-python ``` - Web Preview ## Deploy to App Engine - Create file app.yaml ```bash # create App Engine application gcloud app create --region=us-central # Deploy the app using gcloud app deploy --version=one --quiet ``` - Navigate to App Engine dashboard - Click on the URL - Make changes in the `main.py` and run the following ```bash # The --no-promote parameter tells App Engine to continue serving requests with the old version gcloud app deploy --version=two --no-promote --quiet ``` - Now visit the URL again. You should see the same version of code. - Go to versions in the console - Click on version 2 link to test it. - Select split traffic and change to version 2 and save - Visit the URL again and refresh ## Deploy the Kubernetes - Create Manual k8s cluster with all defaults set - Connect to the cluster ```bash kubectl get nodes ``` - Make changes in `main.py` and run the following - Create the k8s-manifests.yaml file - Enter the following commands to use Cloud Build to create the image and store it in Container Registry ```bash # Check the image tag created in the output gcloud builds submit --tag gcr.io/$DEVSHELL_PROJECT_ID/devops-image:v0.2 . # Replace this tag in the k8s-manifests.yaml ``` - Apply the changes ```bash kubectl apply -f k8s-manifests.yaml kubectl get pods kubectl get svc ``` - Visit the external IP created by the service ## Deploy to CloudRun - Make changes in the `main.py` file - Trigger build in CloudBuild ```bash gcloud builds submit --tag gcr.io/$DEVSHELL_PROJECT_ID/cloud-run-image:v0.1 . ``` - Go to Cloudrun in the console - Give `Service name` as `hello-cloud-run` - Autoscaling max 6 - Authentication as `Allow unauthenticated invocations` - Keep the `Container, Connections, Security ` as Default - Create - Visit the URL created ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun/app.yaml ================================================ # For AppEngine runtime: python37 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun/k8s-manifests.yaml ================================================ --- apiVersion: apps/v1 kind: Deployment metadata: name: devops-deployment labels: app: devops tier: frontend spec: replicas: 3 selector: matchLabels: app: devops tier: frontend template: metadata: labels: app: devops tier: frontend spec: containers: - name: devops-demo image: ports: - containerPort: 8080 --- apiVersion: v1 kind: Service metadata: name: devops-deployment-lb labels: app: devops tier: frontend-lb spec: type: LoadBalancer ports: - port: 80 targetPort: 8080 selector: app: devops tier: frontend ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun/main.py ================================================ from flask import Flask, render_template, request app = Flask(__name__) @app.route("/") def main(): model = {"title": "Hello DevOps Fans."} return render_template('index.html', model=model) if __name__ == "__main__": app.run(host='0.0.0.0', port=8080, debug=True, threaded=True) ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun/requirements.txt ================================================ Flask==2.0.3 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun/templates/index.html ================================================ {% extends "layout.html" %} {% block content %}

{{model.title}}

{% endblock %} ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun/templates/layout.html ================================================ {{model.title}}
{% block content %}{% endblock %}
================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun__python/Dockerfile ================================================ FROM python:3.7 WORKDIR /app COPY . . RUN pip install gunicorn RUN pip install -r requirements.txt ENV PORT=8080 CMD exec gunicorn --bind :$PORT --workers 1 --threads 8 main:app ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun__python/ReadMe.md ================================================ # Deploying same Python app to AppEngine, GKE, CloudRun [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Create a minimal python app, dockerize the app - Deploy to App Engine - Deploy to Kubernetes Engine - Deploy to Cloud Run **Skills** - gcp - python - docker - kubernetes - cloud run - app engine - cloud build ![img.png](.images/arch-objective.png) ```bash docker build -t test-python . docker run --rm -p 8080:8080 test-python ``` - Web Preview ## Deploy to App Engine - Create file app.yaml ```bash # create App Engine application gcloud app create --region=us-central # Deploy the app using gcloud app deploy --version=one --quiet ``` - Navigate to App Engine dashboard - Click on the URL - Make changes in the `main.py` and run the following ```bash # The --no-promote parameter tells App Engine to continue serving requests with the old version gcloud app deploy --version=two --no-promote --quiet ``` - Now visit the URL again. You should see the same version of code. - Go to versions in the console - Click on version 2 link to test it. - Select split traffic and change to version 2 and save - Visit the URL again and refresh ## Deploy the Kubernetes - Create Manual k8s cluster with all defaults set - Connect to the cluster ```bash kubectl get nodes ``` - Make changes in `main.py` and run the following - Create the k8s-manifests.yaml file - Enter the following commands to use Cloud Build to create the image and store it in Container Registry ```bash # Check the image tag created in the output gcloud builds submit --tag gcr.io/$DEVSHELL_PROJECT_ID/devops-image:v0.2 . # Replace this tag in the k8s-manifests.yaml ``` - Apply the changes ```bash kubectl apply -f k8s-manifests.yaml kubectl get pods kubectl get svc ``` - Visit the external IP created by the service ## Deploy to CloudRun - Make changes in the `main.py` file - Trigger build in CloudBuild ```bash gcloud builds submit --tag gcr.io/$DEVSHELL_PROJECT_ID/cloud-run-image:v0.1 . ``` - Go to Cloudrun in the console - Give `Service name` as `hello-cloud-run` - Autoscaling max 6 - Authentication as `Allow unauthenticated invocations` - Keep the `Container, Connections, Security ` as Default - Create - Visit the URL created ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun__python/app.yaml ================================================ # For AppEngine runtime: python37 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun__python/k8s-manifests.yaml ================================================ --- apiVersion: apps/v1 kind: Deployment metadata: name: devops-deployment labels: app: devops tier: frontend spec: replicas: 3 selector: matchLabels: app: devops tier: frontend template: metadata: labels: app: devops tier: frontend spec: containers: - name: devops-demo image: ports: - containerPort: 8080 --- apiVersion: v1 kind: Service metadata: name: devops-deployment-lb labels: app: devops tier: frontend-lb spec: type: LoadBalancer ports: - port: 80 targetPort: 8080 selector: app: devops tier: frontend ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun__python/main.py ================================================ from flask import Flask, render_template, request app = Flask(__name__) @app.route("/") def main(): model = {"title": "Hello DevOps Fans."} return render_template('index.html', model=model) if __name__ == "__main__": app.run(host='0.0.0.0', port=8080, debug=True, threaded=True) ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun__python/requirements.txt ================================================ Flask==2.0.3 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun__python/templates/index.html ================================================ {% extends "layout.html" %} {% block content %}

{{model.title}}

{% endblock %} ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_008_deploying_app_to_app_engine_and_gke_and_cloudrun__python/templates/layout.html ================================================ {{model.title}}
{% block content %}{% endblock %}
================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_009_monitoring_applications_in_gcp__python/Dockerfile ================================================ FROM python:3.7 WORKDIR /app COPY . . RUN pip install gunicorn RUN pip install -r requirements.txt ENV PORT=8080 CMD exec gunicorn --bind :$PORT --workers 1 --threads 8 main:app ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_009_monitoring_applications_in_gcp__python/ReadMe.md ================================================ # Monitoring Applications in GCP [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Deploy and test the python app locally on gcp console - Deploy an application to App Engine and examine the Cloud logs - View the Profiler Information - Explore Cloud Trace - Monitor resources using Dashboards - Create uptime checks and alerts **Skills** - gcp - python - docker - app engine - cloud build - cloud profiler - cloud trace - cloud monitoring - cloud logging - cloud uptime checks - cloud alerting - cloud dashboards ### Deploy and test the python app locally on gcp console - Run the following in google cloud shell ```bash # confirm that you are authenticated gcloud auth list # to confirm that you are using the correct project for this lab gcloud config list project ``` - Create folder ```bash mkdir gcp-logging cd gcp-logging # Create the required files over here ``` - Enable API ```bash # Profiler has to be enabled in the project gcloud services enable cloudprofiler.googleapis.com ``` - Build and test locally ```bash docker build -t test-python . docker run --rm -p 8080:8080 test-python # Check the web Preview now ``` ### Deploy an application to App Engine and examine the Cloud logs ```bash # Create app engine gcloud app create --region=us-central # deploy your app gcloud app deploy --version=one --quiet ``` - Check the App Engine URL - Go to `Tools` and check the `Logs`. Logs should indicate that Profiler has started and profiles are being generated ### View the Profiler Information - Go the `Profiler`. Check the current insights. - Start compute instance in any region other than `us-central1` - SSH ```bash sudo apt update sudo apt install apache2-utils -y nohup ab -n 1000 -c 10 https://.appspot.com/ > nohup1.out & nohup ab -n 1000 -c 10 https://.appspot.com/ > nohup2.out & nohup ab -n 1000 -c 10 https://.appspot.com/ > nohup3.out & ``` - Now go back to `Profiler` and check again ### Explore Cloud Trace - Go to `Trace` - SSH ```bash nohup ab -n 1000 -c 10 https://.appspot.com/ > nohup4.out & nohup ab -n 1000 -c 10 https://.appspot.com/ > nohup5.out & nohup ab -n 1000 -c 10 https://.appspot.com/ > nohup6.out & ``` ### Monitor resources using Dashboards - Go to `Monitoring` -> `Dashboards` - Check `App Engine` dashboard - Check `VM Instances` dashboard - Click on `Create Dashboard` ### Create uptime checks and alerts - Select `Uptime Checks` | Property | Value | |-------------------------|-------------------------------| | Title | App Engine Uptime Check | | App Engine Uptime Check | HTTPS | | Hostname | .appspot.com | | Resource Type | URL | | Path | / | | Check Frequency | 1 minute | - `Test` - Alert and Notification `Uptime Check Alert` - Create `Notification channels` - Create one with `temp-email` [https://temp-mail.org/en/](https://temp-mail.org/en) - Save - Navigate to `App Engine` and `Disable application`. Check URL, it should work anymore - Return to `Monitoring` and click `Uptime checks`. It should be `Failing` - Click `Alerting`, An incident should be fired. Check your email. - Now enable application in `AppEngine`. Everything should be resolved. Check your email. ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_009_monitoring_applications_in_gcp__python/app.yaml ================================================ # For AppEngine runtime: python37 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_009_monitoring_applications_in_gcp__python/main.py ================================================ from flask import Flask, render_template, request import googlecloudprofiler app = Flask(__name__) @app.route("/") def main(): model = {"title": "Hello DevOps Fans."} return render_template('index.html', model=model) # This code simply turns Profiler on. Once on, Profiler starts reporting application metrics to Google Cloud try: googlecloudprofiler.start(verbose=3) except (ValueError, NotImplementedError) as exc: print(exc) if __name__ == "__main__": app.run(host='0.0.0.0', port=8080, debug=True, threaded=True) ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_009_monitoring_applications_in_gcp__python/requirements.txt ================================================ Flask==2.0.3 itsdangerous==2.0.1 Jinja2==3.0.3 google-cloud-profiler==3.0.6 protobuf==3.20.1 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_009_monitoring_applications_in_gcp__python/templates/index.html ================================================ {% extends "layout.html" %} {% block content %}

{{model.title}}

{% endblock %} ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_009_monitoring_applications_in_gcp__python/templates/layout.html ================================================ {{model.title}}
{% block content %}{% endblock %}
================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_010_alerting_in_google_cloud/ReadMe.md ================================================ # Alerting in Google Cloud [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) ## Objectives Download a sample app from GitHub. Deploy an application to App Engine. Create uptime checks and alerts. Create an alerting policy with the CLI. ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_010_alerting_in_google_cloud__python/Dockerfile ================================================ FROM python:3.7 WORKDIR /app COPY . . RUN pip install gunicorn RUN pip install -r requirements.txt ENV PORT=8080 CMD exec gunicorn --bind :$PORT --workers 1 --threads 8 main:app ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_010_alerting_in_google_cloud__python/ReadMe.md ================================================ # Alerting in Google Cloud - Python App - App Engine [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Run the app locally on gcp console - Deploy to App Engine - Examine the App Engine Logs - Create an App Engine latency alert - Creating an Alerting Policy with the CLI **Skills** - gcp - python - app engine - app engine logs - alerting - alerting policy ### Run the app locally on gcp console ```bash sudo pip3 install -r requirements.txt python3 main.py ``` - Web Preview ### Deploy to App Engine - Create file app.yaml ```bash # create App Engine application gcloud app create --region=us-central # Deploy the app using gcloud app deploy --version=one --quiet ``` - Navigate to App Engine dashboard - Click on the URL ### Examine the App Engine Logs - `Tools` -> `Logs` ### Create an App Engine latency alert #### Check current application latency in Metrics explorer - `Monitoring` > `Metrics explorer` - `Resource & Metric` > `GAE Application` > `Http` > `Response latency` (Wait and hard refresh the page if required) - `Aggregator` to `mean` - Advanced options : `Aligner` to `99th percentile` This show the average time it took our application to return a response to the fastest 99% of requests, cutting off 1% of anomalies. #### Create an alert based on the same metric - `Monitoring` > `Alerting` - Add new notification channel. Give temporary email [temp-mail.org](https://temp-mail.org/en/) - `Alerting` > `Create Policy` - `Select a metric` > `Resource & Metric` > `GAE Application` > `Http` > `Response latency` (Wait and hard refresh the page if required) - `Apply` > Set `rolling window` to `1 min` - `Any time series violates` the `Condition` `is above` a Threshold of `8000`ms, it should trigger an alert. - Set `condition name` to `Response latency [MEAN] for 99th% over 8s` - Next and select notification channel - Name the alert `Hello too slow` > `Next` > `Create Policy` - Run this on the gcp console ```bash while true; do curl -s https://$DEVSHELL_PROJECT_ID.appspot.com/sleepy200 | grep -e "" -e "sleep";sleep .$[( $RANDOM % 10 )]s;done ``` - Check after 5 mins - `Monitoring` > `Alerting` - Check temp email. - `Acknowledge Incident` and see the difference ### Creating an Alerting Policy with the CLI - Run on gcp console ```bash gcloud alpha monitoring policies create --policy-from-file="app-engine-error-percent-policy.json" ``` - Check the new policy created in console - Run ```bash while true; do curl -s https://$DEVSHELL_PROJECT_ID.appspot.com/random500error | grep -e "<title>" -e "error";sleep .$[( $RANDOM % 10 )]s;done ``` - `Monitoring` > `Alerting`, wait another few minutes. - Check your temp email agin ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_010_alerting_in_google_cloud__python/app-engine-error-percent-policy.json ================================================ { "displayName": "HTTP error count exceeds 1 percent for App Engine apps", "combiner": "OR", "conditions": [ { "displayName": "Ratio: HTTP 500s error-response counts / All HTTP response counts", "conditionThreshold": { "filter": "metric.label.response_code>=\"500\" AND metric.label.response_code<\"600\" AND metric.type=\"appengine.googleapis.com/http/server/response_count\" AND resource.type=\"gae_app\"", "aggregations": [ { "alignmentPeriod": "60s", "crossSeriesReducer": "REDUCE_SUM", "groupByFields": [ "project", "resource.label.module_id", "resource.label.version_id" ], "perSeriesAligner": "ALIGN_DELTA" } ], "denominatorFilter": "metric.type=\"appengine.googleapis.com/http/server/response_count\" AND resource.type=\"gae_app\"", "denominatorAggregations": [ { "alignmentPeriod": "60s", "crossSeriesReducer": "REDUCE_SUM", "groupByFields": [ "project", "resource.label.module_id", "resource.label.version_id" ], "perSeriesAligner": "ALIGN_DELTA" } ], "comparison": "COMPARISON_GT", "thresholdValue": 0.01, "duration": "0s", "trigger": { "count": 1 } } } ] } ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_010_alerting_in_google_cloud__python/app.yaml ================================================ # For AppEngine runtime: python37 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_010_alerting_in_google_cloud__python/main.py ================================================ import time import random import json from flask import Flask, render_template, request app = Flask(__name__) @app.route("/") def home(): model = {"title": "Hello DevOps Fans."} return render_template('index.html', model=model) # when you want to have 10 seconds delay in response @app.route("/sleepy200") def sleepy200(): model = {"title": "Hello DevOps Fans. I just woke up from sleep"} time.sleep(10) return render_template('index.html', model=model) # The route should give random 500 error @app.route("/random500error") def random500(): num = random.randrange(20) if num == 0: return json.dumps({"error": 'Error thrown randomly'}), 500 else: model = {"title": "Still 200 OK, try again :) ."} return render_template('index.html', model=model) if __name__ == "__main__": app.run(host='0.0.0.0', port=8080, debug=True, threaded=True) ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_010_alerting_in_google_cloud__python/requirements.txt ================================================ Flask==2.0.3 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_010_alerting_in_google_cloud__python/templates/index.html ================================================ {% extends "layout.html" %} {% block content %} <div class="jumbotron"> <div class="container"> <h1>{{model.title}}</h1> </div> </div> {% endblock %} ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_010_alerting_in_google_cloud__python/templates/layout.html ================================================ <!doctype html> <html lang="en"> <head> <title>{{model.title}}
{% block content %}{% endblock %}
================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_011_service_monitoring__node/ReadMe.md ================================================ # Service Monitoring [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) [haggman/HelloLoggingNodeJS.git](https://github.com/haggman/HelloLoggingNodeJS.git) **High Level Objectives** - Deploy the test nodejs app - Use Service Monitoring to create an availability SLO - Create an alert tied to your SLO - Trigger the alert **Skills** - gcp - nodejs - app engine - app engine logs - alerting - service monitoring - service level objective - error budget - error reporting - monitoring ### Deploy the test nodejs app - Clone the repo ```bash git clone https://github.com/haggman/HelloLoggingNodeJS.git cd HelloLoggingNodeJS ``` - Create new App Engine app ```bash gcloud app create --region=us-central ``` - Deploy the Hello Logging app to App Engine ```bash gcloud app deploy ``` - Test the URL ### Use Service Monitoring to create an availability SLO, Create an alert tied to your SLO - Place some load on application ```bash # The loop generates ten requests per second. # The URL is to the /random-error route, which generates an error about every 1000 requests, # so you should see approximately 1 error every 100s. while true; \ do curl -s https://$DEVSHELL_PROJECT_ID.appspot.com/random-error \ -w '\n' ;sleep .1s;done ``` - `App Engine` > `Dashboard` - Check `Server Errors` - Navigation menu to go to `Error Reporting`. Notice the error is also being caught here - Navigation menu to go to `Monitoring` > `Services` > `default` > `+Create SLO` - `Availability` to `Request based` - `Period type` to `Rolling` and `Period Length` to `7 days` - Set `Goal` to `99.5%` - Create ### Create an alert tied to your SLO - Expand the new SLO and investigate the information it displays - Check three tabs, `Service level indicator`, `Error budget`, and `Alerts firing` - `Alerts firing` > `CREATE SLO ALERT` - `Display Name` to `Really short window test` - `Lookback duration` to `10` minutes and `burn rate threshold` to `1.5` - Create notification channel and select it - Next and create ### Trigger the alert - In the `index.js` file - Scroll to the /random-error route found at approximately line 126 and modify the value next to Math.random from 1000 to 20 - Run ```bash gcloud app deploy while true; \ do curl -s https://$DEVSHELL_PROJECT_ID.appspot.com/random-error \ -w '\n' ;sleep .1s;done ``` - Wait for sometime and notice the new alert triggered. ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_011_service_monitoring__node_app/ReadMe.md ================================================ # Service Monitoring [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) [haggman/HelloLoggingNodeJS.git](https://github.com/haggman/HelloLoggingNodeJS.git) ## High Level Objectives - Deploy the test nodejs app - Use Service Monitoring to create an availability SLO - Create an alert tied to your SLO - Trigger the alert ### Deploy the test nodejs app - Clone the repo ```bash git clone https://github.com/haggman/HelloLoggingNodeJS.git cd HelloLoggingNodeJS ``` - Create new App Engine app ```bash gcloud app create --region=us-central ``` - Deploy the Hello Logging app to App Engine ```bash gcloud app deploy ``` - Test the URL ### Use Service Monitoring to create an availability SLO, Create an alert tied to your SLO - Place some load on application ```bash # The loop generates ten requests per second. # The URL is to the /random-error route, which generates an error about every 1000 requests, # so you should see approximately 1 error every 100s. while true; \ do curl -s https://$DEVSHELL_PROJECT_ID.appspot.com/random-error \ -w '\n' ;sleep .1s;done ``` - `App Engine` > `Dashboard` - Check `Server Errors` - Navigation menu to go to `Error Reporting`. Notice the error is also being caught here - Navigation menu to go to `Monitoring` > `Services` > `default` > `+Create SLO` - `Availability` to `Request based` - `Period type` to `Rolling` and `Period Length` to `7 days` - Set `Goal` to `99.5%` - Create ### Create an alert tied to your SLO - Expand the new SLO and investigate the information it displays - Check three tabs, `Service level indicator`, `Error budget`, and `Alerts firing` - `Alerts firing` > `CREATE SLO ALERT` - `Display Name` to `Really short window test` - `Lookback duration` to `10` minutes and `burn rate threshold` to `1.5` - Create notification channel and select it - Next and create ### Trigger the alert - In the `index.js` file - Scroll to the /random-error route found at approximately line 126 and modify the value next to Math.random from 1000 to 20 - Run ```bash gcloud app deploy while true; \ do curl -s https://$DEVSHELL_PROJECT_ID.appspot.com/random-error \ -w '\n' ;sleep .1s;done ``` - Wait for sometime and notice the new alert triggered. ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_012_monitoring_and_dashboarding_multiple_projects_from_a_single_workspace/ReadMe.md ================================================ # Monitoring and Dashboarding Multiple Projects from a Single Workspace [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) ## High Level Objectives - Configure Resource Projects - Create a Monitoring Workspace and link the two worker projects into it - Create and configure Monitoring groups - Create and test an uptime check We need to have 3 different Projects. The first project (ID 1) will be the monitoring workspace host project. Projects ID 2 and ID 3 will be the monitored/resource projects. Per Google's recommended best practices, the project we use to host the monitoring workspace will not be one of the projects actually housing monitored resources. ### Configure Resource Projects - Label Project ID 1 as Monitoring Project. - Label Project ID 2 as Worker 1. - Label Project ID 3 as Worker 2. - Launch `NGINX Open Source Packaged by Bitnami` from `Marketplace` in `Worker 1` and `Worker 2` projects ### Create a Monitoring Workspace and link the two worker projects into it - Go to `Monitoring Project` - `Monitoring` > `Overview` > `Settings` - Add `Worker 1` and `Worker 2` - Choose `Use this project as the scoping project` - Save and go to `Dashboards`. Take few minutes for explore. ### Create and configure Monitoring groups - Go to each `Worker` Project - Assign labels to both VMs in `Worker 1` and `Worker 2` - `component:frontend` - `stage:dev/test` - Create Resource Group - `Monitoring` > `Groups` > `Create` > Name : Frontend Servers - Give `component` = `frontend` criteria. You should see 2 instances - Create a `Sub Group`, Keep the first criteria as same. Give second as `stage` = `dev` - Check the UI when done ### Create and test an uptime check - Create an uptime check for the Frontend Servers group - Check out how an uptime check handles failure - What can Cloud Monitoring, Logging, and Alerting tell us? ### Create a custom dashboard - Create a developer dashboard and add an uptime chart to it - Add and test a CPU utilization chart to the dashboard ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_012_monitoring_and_dashboarding_multiple_projects_from_a_single_workspace__nginx/ReadMe.md ================================================ # Monitoring and Dashboarding Multiple Projects from a Single Workspace [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Configure Resource Projects - Create a Monitoring Workspace and link the two worker projects into it - Create and configure Monitoring groups - Create and test an uptime check **Skills** - gcp - monitoring - dashboarding - multiple projects - monitoring groups - uptime check We need to have 3 different Projects. The first project (ID 1) will be the monitoring workspace host project. Projects ID 2 and ID 3 will be the monitored/resource projects. Per Google's recommended best practices, the project we use to host the monitoring workspace will not be one of the projects actually housing monitored resources. ### Configure Resource Projects - Label Project ID 1 as Monitoring Project. - Label Project ID 2 as Worker 1. - Label Project ID 3 as Worker 2. - Launch `NGINX Open Source Packaged by Bitnami` from `Marketplace` in `Worker 1` and `Worker 2` projects ### Create a Monitoring Workspace and link the two worker projects into it - Go to `Monitoring Project` - `Monitoring` > `Overview` > `Settings` - Add `Worker 1` and `Worker 2` - Choose `Use this project as the scoping project` - Save and go to `Dashboards`. Take few minutes for explore. ### Create and configure Monitoring groups - Go to each `Worker` Project - Assign labels to both VMs in `Worker 1` and `Worker 2` - `component:frontend` - `stage:dev/test` - Create Resource Group - `Monitoring` > `Groups` > `Create` > Name : Frontend Servers - Give `component` = `frontend` criteria. You should see 2 instances - Create a `Sub Group`, Keep the first criteria as same. Give second as `stage` = `dev` - Check the UI when done ### Create and test an uptime check - Create an uptime check for the Frontend Servers group - Check out how an uptime check handles failure - What can Cloud Monitoring, Logging, and Alerting tell us? ### Create a custom dashboard - Create a developer dashboard and add an uptime chart to it - Add and test a CPU utilization chart to the dashboard ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_013_compute_logging_and_monitoring/ReadMe.md ================================================ # Compute Logging And Monitoring [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Set up a VM and a GKE cluster. - Install and use the logging and monitoring agents for Compute Engine. - Add a service to the GKE cluster and examine its logs and metrics. **Skills** - gcp - vm - gke - logging agent - monitoring agent - logs and metrics ### Set up a VM and a GKE cluster Create VM - Name : `web-server-vm` - Boot Disk : `Debian GNU/Linux 10 (buster)` - `Allow HTTP traffic` - SSH ```bash sudo apt-get update sudo apt-get install nginx sudo nginx -v URL=URL_to_your_server while true; do curl -s $URL | grep -oP ".*"; \ sleep .1s;done # Check to make sure you have the requisite scopes to perform logging and monitoring. # We need logging.write and monitoring.write curl --silent --connect-timeout 1 -f -H "Metadata-Flavor: Google" \ http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/scopes # Download the script, add the monitoring agent repo to apt, and install the agent. curl -sSO https://dl.google.com/cloudagents/add-monitoring-agent-repo.sh sudo bash add-monitoring-agent-repo.sh --also-install # Start the monitoring agent: sudo service stackdriver-agent start # Install the logging agent: curl -sSO https://dl.google.com/cloudagents/add-logging-agent-repo.sh sudo bash add-logging-agent-repo.sh --also-install # Check status of both sudo service google-fluentd status sudo service stackdriver-agent status # To fully integrate the server, you enable the status information handler in # Nginx by adding a configuration file to the Nginx configuration directory: (cd /etc/nginx/conf.d/ && sudo curl -O https://raw.githubusercontent.com/Stackdriver/stackdriver-agent-service-configs/master/etc/nginx/conf.d/status.conf) # Reload nginx service sudo service nginx reload # Enable the Nginx monitoring plugin: (cd /opt/stackdriver/collectd/etc/collectd.d/ && sudo curl -O https://raw.githubusercontent.com/Stackdriver/stackdriver-agent-service-configs/master/etc/collectd.d/nginx.conf) # Restart the monitoring agent sudo service stackdriver-agent restart ``` Create GKE Cluster - Name : `gke-cluster` ### Use the logging and monitoring agents for Compute Engine - Monitoring > Metrics Explorer - Resource & Metric : VM Instance > Instance > CPU utilization - Filter instance_name = web-server-vm : Apply - Resource & Metric : VM Instance > nginx > Requests : Apply ### Add a service to the GKE cluster and examine its logs and metrics - Cloud Shell ```bash # Enable the Cloud Build API as it is needed in a few steps gcloud services enable cloudbuild.googleapis.com git clone https://github.com/haggman/HelloLoggingNodeJS.git # Take a few mins to check the code # Submit the Dockerfile to Google's Cloud Build to generate a container and store it in your Container Registry: gcloud builds submit --tag gcr.io/$DEVSHELL_PROJECT_ID/hello-logging-js . # Edit k8sapp.yaml and replace the $GCLOUD_PROJECT with actual ID # Connect to cluster and kubectl apply -f k8sapp.yaml kubectl get services URL=url_to_k8s_app while true; do curl -s $URL -w "\n"; sleep .1s;done ``` - Monitoring > Dashboards > GKE > VIEW ALL and enable Sparklines and click Apply - Switch to the Workloads tab. This is focused on the deployed workloads, grouped by namespace - Finally, scroll to the Kubernetes Services tab and expand hello-logging-service ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_014_log_analysis/ReadMe.md ================================================ # Log Analysis [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) ## High Level Objectives - Set up and deploy a test application. - Explore the log entries generated by the test application. - Create and use a logs-based metric. - Export application logs to BigQuery. ### Set up and deploy a test application. - Enable APIs ```bash gcloud services enable cloudbuild.googleapis.com \ run.googleapis.com \ compute.googleapis.com \ cloudprofiler.googleapis.com ``` - Clone ```bash git clone https://github.com/haggman/HelloLoggingNodeJS.git ``` - Change error rate ```node //Generates an uncaught exception every 20 requests app.get('/random-error', (req, res) => { error_rate = parseInt(req.query.error_rate) || 20 let errorNum = (Math.floor(Math.random() * error_rate) + 1); if (errorNum==1) { console.log("Called /random-error, and it's about to error"); doesNotExist(); } console.log("Called /random-error, and it worked"); res.send("Worked this time."); }); ``` - Build and deploy ```bash sh rebuildService.sh # Set URL ENV URL=$(gcloud run services list --platform managed --format="value(URL)" | grep hello-logging) echo $URL # Add some traffic while true; \ do curl -s $URL/random-error \ -w '\n' ;sleep .1s;done ``` ### Explore the log files for a test application - Cloud Run Revision > hello-logging ### Create and use a logs-based metric - Modify while loop to hit different endpoint ```bash while true; \ do curl -s $URL/score \ -w '\n' ;sleep .1s;done ``` - Explore the logs generated - Cloud Run Revision > hello-logging - Change the `/score` route with below code. - Notice how the message contents are now properties of the output object, and how the printed message is the JSON object stringified. ```node //Basic NodeJS app built with the express server app.get('/score', (req, res) => { //Random score, the contaierID is a UUID unique to each //runtime container (testing was done in Cloud Run). //funFactor is a random number 1-100 let score = Math.floor(Math.random() * 100) + 1; let output = { message: '/score called', score: score, containerID: containerID, funFactor: funFactor }; console.log(JSON.stringify(output)); //Basic message back to browser res.send(`Your score is a ${score}. Happy?`); }); ``` - Rebuild and deploy ```bash sh rebuildService.sh ``` - Restart the loop ```bash while true; \ do curl -s $URL/score \ -w '\n' ;sleep .1s;done ``` - Explore logs again and examine the new format. - Create a score logs-based metric using field `jsonPayload.score` - Create Dashboard using this metric ### Export application logs to BigQuery - Modify while loop again ```bash while true; \ do curl -s $URL/random-error \ -w '\n' ;sleep .1s;done ``` - Explore logs again and validate the change - Create and execute a query to pull just the textPayloads that start with ReferenceError ```roomsql SELECT textPayload FROM `[project-id].hello_logging_logs.run_googleapis_com_stderr_[date]` WHERE textPayload LIKE 'ReferenceError%' ``` - To do a count, modify the query to count these entries: ```roomsql SELECT count(textPayload) FROM `[project-id].hello_logging_logs.run_googleapis_com_stderr_[date]` WHERE textPayload LIKE 'ReferenceError%' ``` - To check the error percentage, build a query that compares the total requests to the ReferenceError% requests. ```roomsql SELECT errors / total_requests FROM ( SELECT ( SELECT COUNT(*) FROM `[project-id].hello_logging_logs.run_googleapis_com_requests_[date]`) AS total_requests, ( SELECT COUNT(textPayload) FROM `[project-id].hello_logging_logs.run_googleapis_com_stderr_[date]` WHERE textPayload LIKE 'ReferenceError%') AS errors) ``` ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_014_log_analysis_cloud_run__node/ReadMe.md ================================================ # Log Analysis [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Set up and deploy a test node application to cloud run. - Explore the log entries generated by the test application. - Create and use a logs-based metric. - Export application logs to BigQuery. **Skills** - gcp - cloud run - logging - metrics - bigquery - nodejs - logs-based metrics ### Set up and deploy a test application. - Enable APIs ```bash gcloud services enable cloudbuild.googleapis.com \ run.googleapis.com \ compute.googleapis.com \ cloudprofiler.googleapis.com ``` - Clone ```bash git clone https://github.com/haggman/HelloLoggingNodeJS.git ``` - Change error rate ```node //Generates an uncaught exception every 20 requests app.get('/random-error', (req, res) => { error_rate = parseInt(req.query.error_rate) || 20 let errorNum = (Math.floor(Math.random() * error_rate) + 1); if (errorNum==1) { console.log("Called /random-error, and it's about to error"); doesNotExist(); } console.log("Called /random-error, and it worked"); res.send("Worked this time."); }); ``` - Build and deploy ```bash sh rebuildService.sh # Set URL ENV URL=$(gcloud run services list --platform managed --format="value(URL)" | grep hello-logging) echo $URL # Add some traffic while true; \ do curl -s $URL/random-error \ -w '\n' ;sleep .1s;done ``` ### Explore the log files for a test application - Cloud Run Revision > hello-logging ### Create and use a logs-based metric - Modify while loop to hit different endpoint ```bash while true; \ do curl -s $URL/score \ -w '\n' ;sleep .1s;done ``` - Explore the logs generated - Cloud Run Revision > hello-logging - Change the `/score` route with below code. - Notice how the message contents are now properties of the output object, and how the printed message is the JSON object stringified. ```node //Basic NodeJS app built with the express server app.get('/score', (req, res) => { //Random score, the contaierID is a UUID unique to each //runtime container (testing was done in Cloud Run). //funFactor is a random number 1-100 let score = Math.floor(Math.random() * 100) + 1; let output = { message: '/score called', score: score, containerID: containerID, funFactor: funFactor }; console.log(JSON.stringify(output)); //Basic message back to browser res.send(`Your score is a ${score}. Happy?`); }); ``` - Rebuild and deploy ```bash sh rebuildService.sh ``` - Restart the loop ```bash while true; \ do curl -s $URL/score \ -w '\n' ;sleep .1s;done ``` - Explore logs again and examine the new format. - Create a score logs-based metric using field `jsonPayload.score` - Create Dashboard using this metric ### Export application logs to BigQuery - Modify while loop again ```bash while true; \ do curl -s $URL/random-error \ -w '\n' ;sleep .1s;done ``` - Explore logs again and validate the change - Create and execute a query to pull just the textPayloads that start with ReferenceError ```roomsql SELECT textPayload FROM `[project-id].hello_logging_logs.run_googleapis_com_stderr_[date]` WHERE textPayload LIKE 'ReferenceError%' ``` - To do a count, modify the query to count these entries: ```roomsql SELECT count(textPayload) FROM `[project-id].hello_logging_logs.run_googleapis_com_stderr_[date]` WHERE textPayload LIKE 'ReferenceError%' ``` - To check the error percentage, build a query that compares the total requests to the ReferenceError% requests. ```roomsql SELECT errors / total_requests FROM ( SELECT ( SELECT COUNT(*) FROM `[project-id].hello_logging_logs.run_googleapis_com_requests_[date]`) AS total_requests, ( SELECT COUNT(textPayload) FROM `[project-id].hello_logging_logs.run_googleapis_com_stderr_[date]` WHERE textPayload LIKE 'ReferenceError%') AS errors) ``` ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_015_cloud_audit_logs/ReadMe.md ================================================ # Cloud Audit Logs [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) ## High Level Objectives - Enable data access logs on Cloud Storage. - Generate admin and data access activity. - View Audit logs. ### Enable data access logs on Cloud Storage - Navigation Menu > IAM & Admin > Audit Logs. - Scroll or use Filter to locate `Google Cloud Storage`, then check the box next to it. This should display the Info Panel with options on LOG TYPE. - Select Admin Read, Data Read and Data Write, and then click SAVE. ### Generate some admin and data access activity ```bash # Use gsutil to create a Cloud Storage bucket with the same name as your project: gsutil mb gs://$DEVSHELL_PROJECT_ID # Make sure the bucket successfully created: gsutil ls # Create a simple "Hello World" type of text file and upload it to your bucket: echo "Hello World!" > sample.txt gsutil cp sample.txt gs://$DEVSHELL_PROJECT_ID # Verify the file is in the bucket: gsutil ls gs://$DEVSHELL_PROJECT_ID # Create a new auto mode network named mynetwork, then create a new virtual machine and place it on the new network: gcloud compute networks create mynetwork --subnet-mode=auto gcloud compute instances create default-us-vm \ --zone=us-west4-b --network=mynetwork # Delete the storage bucket: gsutil rm -r gs://$DEVSHELL_PROJECT_ID ``` ### Viewing audit logs - Navigation menu to navigate to Cloud overview > Activity. - Filters pane, click the Activity types, select all, and click OK - Click the Resource type > Select GCE Network > OK - Navigation menu to navigate to Logging > Logs Explorer. - Click the Log name dropdown and use the filter to locate the activity log under CLOUD AUDIT section and Apply it to the query. - Log fields explorer to filter to GCS Bucket entries. - Expand the delete entry, then drill into protoPayload > authenticationInfo field and notice you can see the email address of the user that performed this action. ```bash gcloud logging read \ "logName=projects/$DEVSHELL_PROJECT_ID/logs/cloudaudit.googleapis.com%2Fdata_access" ``` ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_015_cloud_storage_audit_logs/ReadMe.md ================================================ # Cloud Audit Logs [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Enable data access logs on Cloud Storage. - Generate admin and data access activity. - View Audit logs. **Skills** - gcp - gcp-logging - access-logs - audit-logs - cloud-storage ### Enable data access logs on Cloud Storage - Navigation Menu > IAM & Admin > Audit Logs. - Scroll or use Filter to locate `Google Cloud Storage`, then check the box next to it. This should display the Info Panel with options on LOG TYPE. - Select Admin Read, Data Read and Data Write, and then click SAVE. ### Generate some admin and data access activity ```bash # Use gsutil to create a Cloud Storage bucket with the same name as your project: gsutil mb gs://$DEVSHELL_PROJECT_ID # Make sure the bucket successfully created: gsutil ls # Create a simple "Hello World" type of text file and upload it to your bucket: echo "Hello World!" > sample.txt gsutil cp sample.txt gs://$DEVSHELL_PROJECT_ID # Verify the file is in the bucket: gsutil ls gs://$DEVSHELL_PROJECT_ID # Create a new auto mode network named mynetwork, then create a new virtual machine and place it on the new network: gcloud compute networks create mynetwork --subnet-mode=auto gcloud compute instances create default-us-vm \ --zone=us-west4-b --network=mynetwork # Delete the storage bucket: gsutil rm -r gs://$DEVSHELL_PROJECT_ID ``` ### Viewing audit logs - Navigation menu to navigate to Cloud overview > Activity. - Filters pane, click the Activity types, select all, and click OK - Click the Resource type > Select GCE Network > OK - Navigation menu to navigate to Logging > Logs Explorer. - Click the Log name dropdown and use the filter to locate the activity log under CLOUD AUDIT section and Apply it to the query. - Log fields explorer to filter to GCS Bucket entries. - Expand the delete entry, then drill into protoPayload > authenticationInfo field and notice you can see the email address of the user that performed this action. ```bash gcloud logging read \ "logName=projects/$DEVSHELL_PROJECT_ID/logs/cloudaudit.googleapis.com%2Fdata_access" ``` ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_016_analyzing_network_traffic_with_vpc_flow_logs/ReadMe.md ================================================ # Analyzing Network Traffic With VPC Flow Logs [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Configure a custom network with VPC flow logs - Create an Apache web server - Verify that network traffic is logged - Export the network traffic to BigQuery to further analyze the logs - Add VPC flow log aggregation **Skills** - gcp - gcp-networking - gcp-vpc - gcp-vpc-flow-logs - gcp-bigquery - vpc flow logs aggregation - export to bigquery ### Configure a custom network with VPC flow logs - In the Cloud Console, in the Navigation menu (Navigation menu icon), select VPC network > VPC networks. `vpc-net` | Property | Value | |----------|---------| | Name | vpc-net | - Subnet creation mode, click Custom | Property | Value | |------------------|-------------| | Name | vpc-subnet | | Region | us-central1 | | IP address range | 10.1.3.0/24 | | Flow Logs | On | - Create Firewall Rule | Property | Value | |---------------------|-----------------------------------------------------------------| | Name | allow-http-ssh | | Network | vpc-net | | Targets | Specified target tags | | Target tags | http-server | | Source filter | IPv4 Ranges | | Source IPv4 ranges | 0.0.0.0/0 | | Protocols and ports | Specified protocols and ports, and then check tcp, type: 80, 22 | ### Create an Apache web server - In the Navigation menu, select Compute Engine > VM instances. | Property | Value | |--------------|---------------| | Name | web-server | | Region | us-central1 | | Zone | us-central1-c | | Series | N1 | | Machine type | f1-micro | - Click Networking, Disks, Security, Management, Sole-tenancy. - For Network tags, type http-server. - Specify the following and leave the remaining settings as their defaults: | Property | Value | |------------|--------------------------| | Network | vpc-net | | Subnetwork | vpc-subnet (10.1.3.0/24) | - Install Apache ```bash # In the web-server SSH terminal, update the package index: sudo apt-get update # Install the apache2 package: sudo apt-get install apache2 -y # To create a new default web page by overwriting the default, run the following: echo '

Hello World!

' | sudo tee /var/www/html/index.html ``` ### Verify that network traffic is logged - Find your IP address - In the Cloud Console, go to Navigation menu > Logging > Logs Explorer. ### Export the network traffic to BigQuery to further analyze the logs - Create an export sink - Generate log traffic for BigQuery - Note the External IP address for the web-server instance. It will be referred to as EXTERNAL_IP. ```bash # Store the EXTERNAL_IP in an environment variable in Cloud Shell: export MY_SERVER= # Access the web-server 50 times from Cloud Shell: for ((i=1;i<=50;i++)); do curl $MY_SERVER; done ``` - Visualize the VPC flow logs in BigQuery - Add the following to the BigQuery Editor and replace your_table_id with TABLE_ID while retaining the accents (`) on both sides: ```roomsql #standardSQL SELECT jsonPayload.src_vpc.vpc_name, SUM(CAST(jsonPayload.bytes_sent AS INT64)) AS bytes, jsonPayload.src_vpc.subnetwork_name, jsonPayload.connection.src_ip, jsonPayload.connection.src_port, jsonPayload.connection.dest_ip, jsonPayload.connection.dest_port, jsonPayload.connection.protocol FROM `your_table_id` GROUP BY jsonPayload.src_vpc.vpc_name, jsonPayload.src_vpc.subnetwork_name, jsonPayload.connection.src_ip, jsonPayload.connection.src_port, jsonPayload.connection.dest_ip, jsonPayload.connection.dest_port, jsonPayload.connection.protocol ORDER BY bytes DESC LIMIT 15 ``` - Analyze the VPC flow logs in BigQuery - Create a new query in the BigQuery Editor with the following and replace your_table_id with TABLE_ID while retaining the accents (`) on both sides: ```roomsql #standardSQL SELECT jsonPayload.connection.src_ip, jsonPayload.connection.dest_ip, SUM(CAST(jsonPayload.bytes_sent AS INT64)) AS bytes, jsonPayload.connection.dest_port, jsonPayload.connection.protocol FROM `your_table_id` WHERE jsonPayload.reporter = 'DEST' GROUP BY jsonPayload.connection.src_ip, jsonPayload.connection.dest_ip, jsonPayload.connection.dest_port, jsonPayload.connection.protocol ORDER BY bytes DESC LIMIT 15 ``` ### Add VPC flow log aggregation - In the Console, navigate to the Navigation menu (Navigation menu icon) and select VPC network > VPC networks. ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_017_application_performance_management/ReadMe.md ================================================ # Application Performance Management [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) ## High Level Objectives - Download a pair of sample apps from GitHub - Deploy the converter application to App Engine - Debug the application - Adding log data - Fix the bug and deploy a new version - Examine an error report coming out of Cloud Run in Error Reporting - Examine a default and custom trace span ### Download a pair of sample apps from GitHub - Enable APIs ```bash gcloud services enable cloudbuild.googleapis.com gcloud services enable containerregistry.googleapis.com gcloud services enable run.googleapis.com ## Clone cd ~/ git clone https://github.com/haggman/HelloLoggingNodeJS.git # Change into the HelloLoggingNodeJS folder and use the rebuildService.sh script to deploy the application into Cloud Run: cd ~/HelloLoggingNodeJS sh rebuildService.sh # New terminal session git clone https://github.com/haggman/gcp-debugging cd ~/gcp-debugging # Install req and run sudo pip3 install -r requirements.txt python3 main.py ``` - Web Preview (Web preview button) in the Cloud Shell toolbar, and then select Preview on port 8080. ### Deploy the converter application to App Engine - App Engine needs an application created before it can be used. This is done just once using the gcloud app create command and specifying the region where you want the app to be created. This command takes a minute or so. Please wait for it to complete before moving on: ```bash gcloud app create --region=us-central ``` - Deploy the Flask application into App Engine. This command takes a minute or three to complete. Please wait for it before moving on: ```bash gcloud app deploy --version=one --quiet ``` - Visit App Engine URL ### Debug the application - Navigation menu to navigate to Debugger. - Authorize - CREATE SNAPSHOT ### Adding log data - Logpoint ### Fix the bug and deploy a new version - main.py - replace the if-else block on lines 24 through 29 with the following try-catch block. This is Python, so make sure you get the spacing correct: ```python try: fahrenheit = float(input) celsius = int((fahrenheit - 32.0) * 5.0 / 9.0) except ValueError: fahrenheit = 'Enter a number' celsius = 'Invalid Input' ``` - Deploy ```bash cd ~/gcp-debugging gcloud app deploy --version=two --quiet ``` ### Examine an error report coming out of Cloud Run in Error Reporting ```bash cd ~/HelloLoggingNodeJS edit index.js ``` - Hit `/uncaught` - Navigation menu to view Error Reporting. ```bash # Create a new Google Cloud Source Repository git repo named hello-world: cd ~/HelloLoggingNodeJS gcloud source repos create hello-world ``` - Push a copy of the code into the project Git repository: ```bash git push --mirror \ https://source.developers.google.com/p/$GOOGLE_CLOUD_PROJECT/r/hello-world ``` ### Examine a default and custom trace span - Navigation menu to select Trace. - Scroll down to the /slow route. Edit or replace the method so it resembles the following: ``` //Generates a slow request app.get('/slow', (req, res) => { const span1 = tracer.createChildSpan({name: 'slowPi'}); let pi1=slowPi(); span1.endSpan(); const span2 = tracer.createChildSpan({name: 'slowPi2'}); let pi2=slowPi2(); span2.endSpan(); res.send(`Took it's time. pi to 1,000 places: ${pi1}, pi to 100,000 places: ${pi2}`); }); ``` - Rebuild and deploy ```bash sh rebuildService.sh ``` ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_017_application_performance_management__python_and_nodejs/ReadMe.md ================================================ # Application Performance Management [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - DevOps Engineer, SRE Learning Path](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Download a pair of sample apps from GitHub - Deploy the converter application to App Engine - Debug the application - Adding log data - Fix the bug and deploy a new version - Examine an error report coming out of Cloud Run in Error Reporting - Examine a default and custom trace span **Skills** - gcp - gcp-cloud-run - gcp-app-engine - debug apps - python - nodejs - custom traces - trace span ### Download a pair of sample apps from GitHub - Enable APIs ```bash gcloud services enable cloudbuild.googleapis.com gcloud services enable containerregistry.googleapis.com gcloud services enable run.googleapis.com ## Clone cd ~/ git clone https://github.com/haggman/HelloLoggingNodeJS.git # Change into the HelloLoggingNodeJS folder and use the rebuildService.sh script to deploy the application into Cloud Run: cd ~/HelloLoggingNodeJS sh rebuildService.sh # New terminal session git clone https://github.com/haggman/gcp-debugging cd ~/gcp-debugging # Install req and run sudo pip3 install -r requirements.txt python3 main.py ``` - Web Preview (Web preview button) in the Cloud Shell toolbar, and then select Preview on port 8080. ### Deploy the converter application to App Engine - App Engine needs an application created before it can be used. This is done just once using the gcloud app create command and specifying the region where you want the app to be created. This command takes a minute or so. Please wait for it to complete before moving on: ```bash gcloud app create --region=us-central ``` - Deploy the Flask application into App Engine. This command takes a minute or three to complete. Please wait for it before moving on: ```bash gcloud app deploy --version=one --quiet ``` - Visit App Engine URL ### Debug the application - Navigation menu to navigate to Debugger. - Authorize - CREATE SNAPSHOT ### Adding log data - Logpoint ### Fix the bug and deploy a new version - main.py - replace the if-else block on lines 24 through 29 with the following try-catch block. This is Python, so make sure you get the spacing correct: ```python try: fahrenheit = float(input) celsius = int((fahrenheit - 32.0) * 5.0 / 9.0) except ValueError: fahrenheit = 'Enter a number' celsius = 'Invalid Input' ``` - Deploy ```bash cd ~/gcp-debugging gcloud app deploy --version=two --quiet ``` ### Examine an error report coming out of Cloud Run in Error Reporting ```bash cd ~/HelloLoggingNodeJS edit index.js ``` - Hit `/uncaught` - Navigation menu to view Error Reporting. ```bash # Create a new Google Cloud Source Repository git repo named hello-world: cd ~/HelloLoggingNodeJS gcloud source repos create hello-world ``` - Push a copy of the code into the project Git repository: ```bash git push --mirror \ https://source.developers.google.com/p/$GOOGLE_CLOUD_PROJECT/r/hello-world ``` ### Examine a default and custom trace span - Navigation menu to select Trace. - Scroll down to the /slow route. Edit or replace the method so it resembles the following: ``` //Generates a slow request app.get('/slow', (req, res) => { const span1 = tracer.createChildSpan({name: 'slowPi'}); let pi1=slowPi(); span1.endSpan(); const span2 = tracer.createChildSpan({name: 'slowPi2'}); let pi2=slowPi2(); span2.endSpan(); res.send(`Took it's time. pi to 1,000 places: ${pi1}, pi to 100,000 places: ${pi2}`); }); ``` - Rebuild and deploy ```bash sh rebuildService.sh ``` ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_018_2inst_2buckets_2iam/ReadMe.md ================================================ # Cloud Shell, VMs, Buckets, Service Accounts [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - Getting Started with Google Kubernetes Engine Course](https://www.cloudskillsboost.google) **High Level Objectives** - Learn how to access the Cloud Console and Cloud Shell - Become familiar with the Cloud Console - Become familiar with Cloud Shell features, including the Cloud Shell code editor - Use the Cloud Console and Cloud Shell to create buckets and VMs and service accounts - Perform other commands in Cloud Shell **Skills** - gcp - gcp-console - gcp-storage - gcp-vm - gcp-iam - gcp-shell - gcp-cli - gcp-service-accounts ### Use the Cloud Console and Cloud Shell to create buckets and VMs and service accounts - Create a bucket with the same name as your project ID. (Choose how to control access to objects and uncheck Enforce public access prevention on this bucket, now select Fine-grained) - Create a VM `first-vm`. Allow HTTP traffic - Create an IAM service account `test-service-account` - On the Grant this service account access to project page, specify the role as `Basic > Editor`. - Manage keys - Download JSON key ### Explore Cloud Shell - Use Cloud Shell to set up the environment variables for this task ```bash MY_BUCKET_NAME_1=[BUCKET_NAME] MY_BUCKET_NAME_2=[BUCKET_NAME_2] MY_REGION=us-central1 ``` - Move the credentials file you created earlier into Cloud Shell - Create a second Cloud Storage bucket and verify it in the Cloud Console ```bash gsutil mb gs://$MY_BUCKET_NAME_2 ``` - Use the gcloud command line to create a second virtual machine. Select a zone from the first column of the list. ```bash gcloud compute zones list | grep $MY_REGION # You replace [ZONE] with your selected zone: MY_ZONE=[ZONE] # Set this zone to be your default zone by executing the following command: gcloud config set compute/zone $MY_ZONE # Execute the following command to store a name in an environment variable you will use to create a VM. You will call your second VM second-vm: MY_VMNAME=second-vm # Create a VM in the default zone that you set earlier in this task using the new environment variable to assign the VM name: gcloud compute instances create $MY_VMNAME \ --machine-type "e2-standard-2" \ --image-project "debian-cloud" \ --image-family "debian-11" \ --subnet "default" # List the virtual machine instances in your project: gcloud compute instances list # Look at the External IP column. Notice that the external IP address of the first VM you created is shown as a link. The Google Cloud Console offers the link because you configured this VM's firewall to allow HTTP traffic. ``` - Use the gcloud command line to create a second service account - In Cloud Shell, execute the following command to create a new service account: ```bash gcloud iam service-accounts create test-service-account2 --display-name "test-service-account2" # In Cloud Shell, execute the following command to grant the second service account the Project viewer role: gcloud projects add-iam-policy-binding $GOOGLE_CLOUD_PROJECT --member serviceAccount:test-service-account2@${GOOGLE_CLOUD_PROJECT}.iam.gserviceaccount.com --role roles/viewer ``` ### Work with Cloud Storage in Cloud Shell ```bash # Copy a picture of a cat from a Google-provided Cloud Storage bucket to your Cloud Shell: gsutil cp gs://cloud-training/ak8s/cat.jpg cat.jpg # Copy the file into the first buckets that you created earlier: gsutil cp cat.jpg gs://$MY_BUCKET_NAME_1 # Copy the file from the first bucket into the second bucket: gsutil cp gs://$MY_BUCKET_NAME_1/cat.jpg gs://$MY_BUCKET_NAME_2/cat.jpg ``` - In the Cloud Console, on the Navigation menu(Navigation menu icon), click Cloud Storage > Browser, select both the buckets that you created, and verify that both contain the cat.jpg file - Set the access control list for a Cloud Storage object - Execute the following command in Cloud Shell: ```bash gsutil acl get gs://$MY_BUCKET_NAME_1/cat.jpg > acl.txt cat acl.txt # To change the object to have private access, execute the following command: gsutil acl set private gs://$MY_BUCKET_NAME_1/cat.jpg # To verify the new ACL that's been assigned to cat.jpg, execute the following two commands: gsutil acl get gs://$MY_BUCKET_NAME_1/cat.jpg > acl-2.txt cat acl-2.txt ``` - In Cloud Shell, execute the following command to view the current configuration: ```bash gcloud config list # In Cloud Shell, execute the following command to change the authenticated user to the first service account (which you created in an earlier task) through the credentials that you downloaded to your local machine and then uploaded into Cloud Shell (credentials.json): gcloud auth activate-service-account --key-file credentials.json gcloud config list # To verify the list of authorized accounts in Cloud Shell, execute the following command: gcloud auth list # To verify that the current account (test-service-account) cannot access the cat.jpg file in the first bucket that you created, execute the following command: # Because you restricted access to this file to the owner earlier in this task. gsutil cp gs://$MY_BUCKET_NAME_1/cat.jpg ./cat-copy.jpg # Verify that the current account (test-service-account) can access the cat.jpg file in the second bucket that you created: gsutil cp gs://$MY_BUCKET_NAME_2/cat.jpg ./cat-copy.jpg # To switch to the lab account, execute the following command, replacing [USERNAME] with the username provided in the Qwiklabs gcloud config set account [USERNAME] # To verify that you can access the cat.jpg file in the [BUCKET_NAME] bucket (the first bucket that you created), execute the following command. gsutil cp gs://$MY_BUCKET_NAME_1/cat.jpg ./copy2-of-cat.jpg # Make the first Cloud Storage bucket readable by everyone, including unauthenticated users: gsutil iam ch allUsers:objectViewer gs://$MY_BUCKET_NAME_1 # Get the public URL of the object ``` ### Explore the Cloud Shell code editor - On the Cloud console tab, click Open Terminal and in Cloud Shell, execute the following command to clone a git repository: ```bash git clone https://github.com/googlecodelabs/orchestrate-with-kubernetes.git mkdir test # Add the following text as the last line of the cleanup.sh file: ``` - Add the following text as the last line of the cleanup.sh file: ```bash echo Finished cleanup! ``` - In Cloud Shell, execute the following commands to change directory and display the contents of cleanup.sh: - Create new file. - Save the file in orchestrate-with-kubernetes folder and name the file index.html - Replace the string REPLACE_WITH_CAT_URL with the URL of the cat image from an earlier task ```bash Cat

Cat

``` - first-vm, click the SSH button. ```bash sudo apt-get remove -y --purge man-db sudo touch /var/lib/man-db/auto-update sudo apt-get update sudo apt-get install nginx ``` - In your Cloud Shell window, copy the HTML file you created using the Code Editor to your virtual machine: ```bash gcloud compute scp index.html first-vm:index.nginx-debian.html --zone=us-central1-c ``` - In the SSH login window for your VM, copy the HTML file from your home directory to the document root of the nginx Web server: ```bash sudo cp index.nginx-debian.html /var/www/html ``` - Click the link in the External IP column for your first-vm. A new browser tab opens, containing a Web page that contains the cat image. ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_019_working_with_cloud_build/ReadMe.md ================================================ # Working with Cloud Build [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - Getting Started with Google Kubernetes Engine Course](https://www.cloudskillsboost.google) **High Level Objectives** - Confirm that needed APIs are enabled - Building containers with DockerFile and Cloud Build - Building containers with a build configuration file and Cloud Build - Building and testing containers with a build configuration file and Cloud Build **Skills** - gcp - cloud-build - cloud-shell - cloud-shell-editor - containers - docker - dockerfile ### Confirm that needed APIs are enabled - Cloud Build - Container Registry ### Building containers with DockerFile and Cloud Build ```bash # Create file touch quickstart.sh ``` ```shell #!/bin/sh echo "Hello, world! The time is $(date)." ``` - Dockerfile ```Dockerfile FROM alpine COPY quickstart.sh / CMD ["/quickstart.sh"] ``` ```bash chmod +x quickstart.sh ``` - In Cloud Shell, run the following command to build the Docker container image in Cloud Build: ```bash gcloud builds submit --tag gcr.io/${GOOGLE_CLOUD_PROJECT}/quickstart-image . ``` - In the Google Cloud Console, on the Navigation menu (Navigation menu icon), click Container Registry > Images. ### Building containers with a build configuration file and Cloud Build - In Cloud Shell enter the following command to clone the repository to the lab Cloud Shell: ```bash git clone https://github.com/GoogleCloudPlatform/training-data-analyst # Create a soft link as a shortcut to the working directory: ln -s ~/training-data-analyst/courses/ak8s/v1.1 ~/ak8s # Change to the directory that contains the sample files for this lab: cd ~/ak8s/Cloud_Build/a cat cloudbuild.yaml # In Cloud Shell, execute the following command to start a Cloud Build using cloudbuild.yaml as the build configuration file: gcloud builds submit --config cloudbuild.yaml . ``` - Container Registry > Images and then click quickstart-image. - Navigation menu (Navigation menu icon), click Cloud Build > History. ### Building and testing containers with a build configuration file and Cloud Build - In Cloud Shell, change to the directory that contains the sample files for this lab: ```bash cd ~/ak8s/Cloud_Build/b # In Cloud Shell, execute the following command to view the contents of cloudbuild.yaml cat cloudbuild.yaml # In Cloud Shell, execute the following command to start a Cloud Build using cloudbuild.yaml as the build configuration file: gcloud builds submit --config cloudbuild.yaml . # Confirm that your command shell knows that the build failed: echo $? ``` ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_020_deploying_google_kubernetes_engine/ReadMe.md ================================================ # Deploying Google Kubernetes Engine [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - Getting Started with Google Kubernetes Engine Course](https://www.cloudskillsboost.google) **High Level Objectives** - Use the Google Cloud Console to build and manipulate GKE clusters - Use the Google Cloud Console to deploy a Pod **Skills** - GKE - Kubernetes - Pods - Modify Cluster ### Deploy GKE clusters - cluster name to standard-cluster-1 - zone to us-central1-a. ### Modify GKE clusters - Change the number of nodes from 3 to 4 and click RESIZE ### View details about workloads in the Google Cloud Console - In the Google Cloud Console, on the Kubernetes Engine > Workloads page, click nginx-1. ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_021_creating_google_kubernetes_engine_deployments/ReadMe.md ================================================ # Creating Google Kubernetes Engine Deployments [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - Getting Started with Google Kubernetes Engine Course](https://www.cloudskillsboost.google) **High Level Objectives** - Create deployment manifests, deploy to cluster - Trigger manual scaling up and down of Pods in deployments - Trigger deployment rollout (rolling update to new version) and rollbacks - Perform a Canary deployment **Skills** - gcp - kubernetes - deployments - pods - scaling - rollouts - rollbacks - canary ### Create deployment manifests and deploy to the cluster - Connect ```bash # set the environment variable for the zone and cluster name export my_zone=us-central1-a export my_cluster=standard-cluster-1 # Configure kubectl tab completion in Cloud Shell: source <(kubectl completion bash) # configure access to your cluster for the kubectl command-line tool, using the following command: gcloud container clusters get-credentials $my_cluster --zone $my_zone # In Cloud Shell enter the following command to clone the repository to the lab Cloud Shell: git clone https://github.com/GoogleCloudPlatform/training-data-analyst # Create a soft link as a shortcut to the working directory: ln -s ~/training-data-analyst/courses/ak8s/v1.1 ~/ak8s # Change to the directory that contains the sample files for this lab: cd ~/ak8s/Deployments/ ``` - Create a deployment manifest - nginx-deployment.yaml ```yaml apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deployment labels: app: nginx spec: replicas: 3 selector: matchLabels: app: nginx template: metadata: labels: app: nginx spec: containers: - name: nginx image: nginx:1.7.9 ports: - containerPort: 80 ``` - Apply ```bash kubectl apply -f ./nginx-deployment.yaml kubectl get deployments ``` ### Manually scale up and down the number of Pods in deployments - Navigation menu ( Navigation menu icon), click Kubernetes Engine > Workloads. - nginx-deployment (your deployment) to open the Deployment details page. - ACTIONS > Scale > Edit Replicas. ```bash kubectl get deployments # To scale the Pod back up to three replicas, execute the following command: kubectl scale --replicas=3 deployment nginx-deployment # View kubectl get deployments ``` ### Trigger a deployment rollout and a deployment rollback - To update the version of nginx in the deployment, execute the following command: ```bash kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.9.1 --record # To view the rollout status, execute the following command: kubectl rollout status deployment.v1.apps/nginx-deployment # To verify the change, get the list of deployments: kubectl get deployments # View the rollout history of the deployment: kubectl rollout history deployment nginx-deployment ``` - Trigger a deployment rollback ```bash # To roll back to the previous version of the nginx deployment, execute the following command: kubectl rollout undo deployments nginx-deployment # View the updated rollout history of the deployment: kubectl rollout history deployment nginx-deployment # View the details of the latest deployment revision: kubectl rollout history deployment/nginx-deployment --revision=3 ``` ### Define the service type in the manifest - Define service types in the manifest - service-nginx.yaml ```yaml apiVersion: v1 kind: Service metadata: name: nginx spec: type: LoadBalancer selector: app: nginx ports: - protocol: TCP port: 60000 targetPort: 80 ``` - In the Cloud Shell, to deploy your manifest, execute the following command: ```bash kubectl apply -f ./service-nginx.yaml # To view the details of the nginx service, execute the following command: kubectl get service nginx ``` - When the external IP appears, open http://[EXTERNAL_IP]:60000/ in a new browser tab to see the server being served through network load balancing. ### Perform a canary deployment - The manifest file nginx-canary.yaml that is provided for you deploys a single pod running a newer version of nginx than your main deployment. In this task, you create a canary deployment using this new deployment file: - nginx-canary.yaml ```yaml apiVersion: apps/v1 kind: Deployment metadata: name: nginx-canary labels: app: nginx spec: replicas: 1 selector: matchLabels: app: nginx template: metadata: labels: app: nginx track: canary Version: 1.9.1 spec: containers: - name: nginx image: nginx:1.9.1 ports: - containerPort: 80 ``` ```bash # Create the canary deployment based on the configuration file: kubectl apply -f nginx-canary.yaml # When the deployment is complete, verify that both the nginx and the nginx-canary deployments are present: kubectl get deployments # Switch back to the Cloud Shell and scale down the primary deployment to 0 replicas: kubectl scale --replicas=0 deployment nginx-deployment # Verify that the only running replica is now the Canary deployment: kubectl get deployments ``` - Session affinity - This potential to switch between different versions may cause problems if there are significant changes in functionality in the canary release. To prevent this you can set the sessionAffinity field to ClientIP in the specification of the service if you need a client's first request to determine which Pod will be used for all subsequent connections. ```yaml apiVersion: v1 kind: Service metadata: name: nginx spec: type: LoadBalancer sessionAffinity: ClientIP selector: app: nginx ports: - protocol: TCP port: 60000 targetPort: 80 ``` ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_021_creating_google_kubernetes_engine_deployments/nginx-canary.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: name: nginx-canary labels: app: nginx spec: replicas: 1 selector: matchLabels: app: nginx template: metadata: labels: app: nginx track: canary Version: 1.9.1 spec: containers: - name: nginx image: nginx:1.9.1 ports: - containerPort: 80 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_021_creating_google_kubernetes_engine_deployments/nginx-deployment.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deployment labels: app: nginx spec: replicas: 3 selector: matchLabels: app: nginx template: metadata: labels: app: nginx spec: containers: - name: nginx image: nginx:1.7.9 ports: - containerPort: 80 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_021_creating_google_kubernetes_engine_deployments/nginx-svc-session-affinity.yaml ================================================ apiVersion: v1 kind: Service metadata: name: nginx spec: type: LoadBalancer sessionAffinity: ClientIP selector: app: nginx ports: - protocol: TCP port: 60000 targetPort: 80 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_021_creating_google_kubernetes_engine_deployments/service-nginx.yaml ================================================ apiVersion: v1 kind: Service metadata: name: nginx spec: type: LoadBalancer selector: app: nginx ports: - protocol: TCP port: 60000 targetPort: 80 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_022_configuring_persistent_storage_for_google_kubernetes_engine/ReadMe.md ================================================ # Configuring Persistent Storage for Google Kubernetes Engine [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - Getting Started with Google Kubernetes Engine Course](https://www.cloudskillsboost.google) **High Level Objectives** - Create manifests for PersistentVolumeClaims (PVCs) for Google Cloud persistent disks (dynamically created or existing) - Mount Google Cloud persistent disk PVCs as volumes in Pods - Use manifests to create StatefulSets - Mount Google Cloud persistent disk PVCs as volumes in StatefulSets - Verify the connection of Pods in StatefulSets to particular PVs as the Pods are stopped and restarted **Skills** - gcp - kubernetes - persistent storage - persistent volumes - persistent volume claims - statefulsets ### Create PVCs ```bash # Connect to the lab GKE cluster export my_zone=us-central1-a export my_cluster=standard-cluster-1 # Configure tab completion for the kubectl command-line tool: source <(kubectl completion bash) # Configure access to your cluster for kubectl: gcloud container clusters get-credentials $my_cluster --zone $my_zone ``` - In Cloud Shell, enter the following command to clone the repository to the lab Cloud Shell: ```bash git clone https://github.com/GoogleCloudPlatform/training-data-analyst # Create a soft link as a shortcut to the working directory: ln -s ~/training-data-analyst/courses/ak8s/v1.1 ~/ak8s # Change to the directory that contains the sample files for this lab: cd ~/ak8s/Storage/ # To show that you currently have no PVCs, execute the following command: kubectl get persistentvolumeclaim # To create the PVC, execute the following command: kubectl apply -f pvc-demo.yaml # To show your newly created PVC, execute the following command: kubectl get persistentvolumeclaim ``` ### Mount and verify Google Cloud persistent disk PVCs in Pods - To create the Pod with the volume, execute the following command: ```bash kubectl apply -f pod-volume-demo.yaml kubectl get pods # To verify the PVC is accessible within the Pod, you must gain shell access to your Pod. To start the shell session, execute the following command: kubectl exec -it pvc-demo-pod -- sh # To create a simple text message as a web page in the Pod enter the following commands: echo Test webpage in a persistent volume!>/var/www/html/index.html chmod +x /var/www/html/index.html # Verify the text file contains your message: cat /var/www/html/index.html exit ``` - Test the persistence of the PV ```bash # Delete the pvc-demo-pod: kubectl delete pod pvc-demo-pod kubectl get pods # To show your PVC, execute the following command: kubectl get persistentvolumeclaim # Redeploy the pvc-demo-pod: kubectl apply -f pod-volume-demo.yaml kubectl get pods kubectl exec -it pvc-demo-pod -- sh # To verify that the text file still contains your message execute the following command: cat /var/www/html/index.html ``` ### Create StatefulSets with PVCs - Before you can use the PVC with the statefulset, you must delete the Pod that is currently using it. Execute the following command to delete the Pod: ```bash kubectl delete pod pvc-demo-pod ``` - To create the StatefulSet with the volume, execute the following command: ```bash kubectl apply -f statefulset-demo.yaml # Use "kubectl describe" to view the details of the StatefulSet: kubectl describe statefulset statefulset-demo # List the Pods in the cluster: kubectl get pods # To list the PVCs, execute the following command: kubectl get pvc # Use "kubectl describe" to view the details of the first PVC in the StatefulSet: kubectl describe pvc hello-web-disk-statefulset-demo-0 ``` ### Verify the persistence of Persistent Volume connections to Pods managed by StatefulSets - To verify that the PVC is accessible within the Pod, you must gain shell access to your Pod. To start the shell session, execute the following command: ```bash kubectl exec -it statefulset-demo-0 -- sh # Verify that there is no index.html text file in the /var/www/html directory: cat /var/www/html/index.html # To create a simple text message as a web page in the Pod enter the following commands: echo Test webpage in a persistent volume!>/var/www/html/index.html chmod +x /var/www/html/index.html # Verify the text file contains your message: cat /var/www/html/index.html exit # Delete the Pod where you updated the file on the PVC: kubectl delete pod statefulset-demo-0 kubectl get pods kubectl exec -it statefulset-demo-0 -- sh # Verify that the text file still contains your message: cat /var/www/html/index.html ``` ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_022_configuring_persistent_storage_for_google_kubernetes_engine/pod-volume-demo.yaml ================================================ kind: Pod apiVersion: v1 metadata: name: pvc-demo-pod spec: containers: - name: frontend image: nginx volumeMounts: - mountPath: "/var/www/html" name: pvc-demo-volume volumes: - name: pvc-demo-volume persistentVolumeClaim: claimName: hello-web-disk ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_022_configuring_persistent_storage_for_google_kubernetes_engine/pvc-demo.yaml ================================================ apiVersion: v1 kind: PersistentVolumeClaim metadata: name: hello-web-disk spec: accessModes: - ReadWriteOnce resources: requests: storage: 30Gi ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_022_configuring_persistent_storage_for_google_kubernetes_engine/statefulset-demo.yaml ================================================ kind: Service apiVersion: v1 metadata: name: statefulset-demo-service spec: ports: - protocol: TCP port: 80 targetPort: 9376 type: LoadBalancer --- apiVersion: apps/v1 kind: StatefulSet metadata: name: statefulset-demo spec: selector: matchLabels: app: MyApp serviceName: statefulset-demo-service replicas: 3 updateStrategy: type: RollingUpdate template: metadata: labels: app: MyApp spec: containers: - name: stateful-set-container image: nginx ports: - containerPort: 80 name: http volumeMounts: - name: hello-web-disk mountPath: "/var/www/html" volumeClaimTemplates: - metadata: name: hello-web-disk spec: accessModes: [ "ReadWriteOnce" ] resources: requests: storage: 30Gi ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_023_anthos_service_mesh_walkthrough/ReadMe.md ================================================ # Anthos Service Mesh Walkthrough [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - Cloud Operations and Service Mesh with Anthos Course](https://www.cloudskillsboost.google) **High Level Objectives** - Navigate Google Cloud and explore Anthos clusters from the Google Cloud Console. - Perform east-west cross-cluster traffic routing. - Observe distributed services, view metrics, set up SLOs, and investigate network topology. - Use the Anthos Service Mesh Dashboards to verify security and encryption configuration. **Skills** - gcp - gke - kubernetes - istio - service mesh - anthos - SLOs - network topology - service mesh dashboard - cross-cluster traffic routing Anthos Service Mesh (ASM) on Google Kubernetes Engine. Anthos Service Mesh is a managed service based on Istio, the leading open source service mesh ### Intro Two GKE clusters called gke-central and gke-west have been provisioned in us-centra1 and us-west2. Anthos Service Mesh has been configured across these clusters to provide cross-cluster service discoverability and secure routing so that a microservice pod running on gke-central can seemlessly communicate with a pod on gke-west. Additionally, the Bank of Anthos application has been deployed across these two clusters as shown in the following diagram. ![img.png](.images/arch-img.png) ### Explore the app deployed in Anthos clusters - On the Navigation menu, click Kubernetes Engine > Clusters, and verify that two Anthos clusters are registered. - To open websites, click on the IP addresses for each of the instances of istio-ingressgateway. This will open the sign-in page for each website. - Deposit funds or send a payment, which will create a new transaction on the shared database. - Refresh the pages and confirm that both Transaction History and Current Balance are the same across clusters. > Notice that even though all services are replicated across clusters, they are both using the same database because the ledger-db is only deployed on one cluster. Anthos Service Mesh routes requests to the available pods regardless of the cluster you are ingressing from. This is called east-west routing. ### Force cross-cluster traffic routing - To open the dashboard, click on the frontend deployment in the gke-central cluster. - On the Actions dropdown, select Scale > Edit replicas. - Enter 0 replicas, and click Scale. - Return to the istio-ingressgateways IP addresses that you opened before. The application should continue to work. - Return to your frontend deployment and scale it back to 1 replica. ### Observe distributed services - To return to the home page of the Anthos Service Mesh dashboard, on the Navigation menu, click Anthos > Service Mesh. ![img.png](.images/topology.png) - Click on the frontend workload node, and note the services called by that workload. - Click Table view. A list of distributed services in your mesh across your two clusters is displayed. - Click Health - Click Create SLO. - Select Latency as the metric, and click Continue. - Set the latency threshhold to 350 ms, and click Continue. - Set the Period length to Calendar day, and set the Goal to 99.5%. ### Verify service mesh security - From the frontend service ASM Dashboard, open the Security (BETA) tab. The following diagram is displayed: ![img.png](.images/security.png) - Notice that all service to service communication has a green lock. That's because all the communication is encrypted over mutual TLS. Also, notice that an unknown source has an open red lock. That means that an unathenticated agent is accessing the frontend service that is communicating over plain text. This unathenticated agent is the browser. ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_024_observing_anthos_services/ReadMe.md ================================================ # Observing Anthos Services [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - Cloud Operations and Service Mesh with Anthos Course](https://www.cloudskillsboost.google) **High Level Objectives** - Install Anthos Service Mesh, with tracing enabled and configured to use Cloud Trace as the backend. - Deploy Bookinfo, an Istio-enabled multi-service application. - Enable external access using an Istio Ingress Gateway. - Use the Bookinfo application. - Evaluate service performance using Cloud Trace features within Google Cloud. - Create and monitor service-level objectives (SLOs). - Leverage the Anthos Service Mesh Dashboard to understand service performance. **Skills** - gcp - gke - kubernetes - istio - service mesh - anthos - SLOs - network topology - microservices - service performance - Istio ingress gateway - cloud trace Anthos Service Mesh (ASM) on Google Kubernetes Engine. Anthos Service Mesh is a managed service based on Istio, the leading open source service mesh ### Install Anthos Service Mesh with tracing enabled - Set ENV ```bash CLUSTER_NAME=gke CLUSTER_ZONE=us-central1-b PROJECT_ID=$(gcloud config get-value project) PROJECT_NUMBER=$(gcloud projects describe ${PROJECT_ID} \ --format="value(projectNumber)") FLEET_PROJECT_ID=${FLEET_PROJECT_ID:-$PROJECT_ID} IDNS=${PROJECT_ID}.svc.id.goog DIR_PATH=. ``` - Configure kubectl to manage your GKE cluster: ```bash gcloud container clusters get-credentials $CLUSTER_NAME \ --zone $CLUSTER_ZONE --project $PROJECT_ID # Review your kubectl configuration: kubectl config view # Check that your cluster is running: gcloud container clusters list ``` - Install Anthos Service Mesh ```bash # Download the Anthos Service Mesh installation script: curl https://storage.googleapis.com/csm-artifacts/asm/asmcli_1.15 > asmcli chmod +x asmcli # Use asmcli to install Anthos Service Mesh: ./asmcli install \ --project_id $PROJECT_ID \ --cluster_name $CLUSTER_NAME \ --cluster_location $CLUSTER_ZONE \ --fleet_id $FLEET_PROJECT_ID \ --output_dir $DIR_PATH \ --managed \ --enable_all \ --ca mesh_ca # Enable Anthos Service Mesh to send telemetry to Cloud Trace: cat < Trace List. ### Deploy a canary release that has high latency ```bash # In Cloud Shell, clone the repository that has the configuration files you need for this part of the lab: git clone https://github.com/GoogleCloudPlatform/istio-samples.git \ ~/istio-samples # Create the new resources on the gke cluster: kubectl apply -f ~/istio-samples/istio-canary-gke/canary/destinationrule.yaml kubectl apply -f ~/istio-samples/istio-canary-gke/canary/productcatalog-v2.yaml kubectl apply -f ~/istio-samples/istio-canary-gke/canary/vs-split-traffic.yaml ``` ### Define your service level objective - Navigation menu, click Anthos - In the Services list, click productcatalogservice -> Health - Click Create SLO. - In the Set your SLI slideout, for metric, select Latency. - Select Request-based as the method of evaluation. - Click Continue. - Set Latency Threshold to 1000, and click Continue. - Set Period type to Calendar. - Set Period length to Calendar day. - Performance goal to 99.5%. ### Diagnose the problem - Click on your SLO entry in the SLO list. - From the Breakdown By dropdown, select Source service. - Use Cloud Trace to better understand where the delay is - In the Google Cloud Console, on the Navigation menu, click Trace > Trace List. - Click on a dot that charts at around 3000ms; it should represent one of the requests to the product catalog service. ### Roll back the release and verify an improvement - In Cloud Shell, back out the canary release: ```bash kubectl delete -f ~/istio-samples/istio-canary-gke/canary/destinationrule.yaml kubectl delete -f ~/istio-samples/istio-canary-gke/canary/productcatalog-v2.yaml kubectl delete -f ~/istio-samples/istio-canary-gke/canary/vs-split-traffic.yaml ``` - Click on productcatalogservice, and then in the menu pane, click Health. - Compare the current compliance metric with the one you saw earlier. It should be higher now, reflecting the fact that you are no longer seeing high-latency requests. ### Visualize your mesh with the Anthos Service Mesh dashboard - On the Navigation menu, click Anthos > Service Mesh. - Click Topology. A chart representing your service mesh is displayed. ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_025_managing_traffic_with_anthos_service_mesh/ReadMe.md ================================================ # Managing Traffic Flow with Anthos Service Mesh [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - Cloud Operations and Service Mesh with Anthos Course](https://www.cloudskillsboost.google) **High Level Objectives** - Configure and use Istio Gateways - Apply default destination rules, for all available versions - Apply virtual services to route by default to only one version - Route to a specific version of a service based on user identity - Shift traffic gradually from one version of a microservice to another - Use the Anthos Service Mesh dashboard to view routing to multiple versions - Setup networking best practices such as retries, circuit breakers and timeouts **Skills** - gcp - gke - kubernetes - istio - service mesh - anthos - destination rules - virtual services - traffic management - retries - circuit breakers - timeouts Anthos Service Mesh’s traffic management model relies on the following two components: - Control plane: manages and configures the Envoy proxies to route traffic and enforce polices. - Data plane: encompasses all network communication between microservices performed at runtime by the Envoy proxies. ![img.png](.images/img.png) ## Review Traffic Management use cases > In Istio, when an incoming request arrives at a Kubernetes cluster, it first reaches the Gateway resource, > and then the VirtualService resource. The Gateway resource receives the incoming traffic and is responsible > for routing the traffic to the correct VirtualService based on the specified rules. The VirtualService resource > then applies additional routing rules to further direct the traffic to the appropriate destination service or pod. - Example: traffic splitting - Example: timeouts - Example: retries - Example: fault injection: inserting delays - Example: fault injection: inserting aborts - Example: conditional routing: based on request headers ## Setup ```bash # In Cloud Shell, set environment variables for the zone and cluster name: export CLUSTER_NAME=gke export CLUSTER_ZONE=us-central1-b # Configure kubectl command line access by running: export GCLOUD_PROJECT=$(gcloud config get-value project) gcloud container clusters get-credentials $CLUSTER_NAME \ --zone $CLUSTER_ZONE --project $GCLOUD_PROJECT # Check that your cluster is up and running: gcloud container clusters list # Ensure the Kubernetes pods for the Anthos Service Mesh control plane are deployed: kubectl get pods -n istio-system # Ensure corresponding Kubernetes services for the Anthos Service Mesh control plane are deployed: kubectl get service -n istio-system # Ensure corresponding Kubernetes pods for the Anthos Service Mesh control plane are deployed, so that telemetry data is displayed in the ASM Dashboard: kubectl get pods -n asm-system ``` - Verify the Bookinfo deployment ```bash kubectl get pods # Review running application services: kubectl get services ``` ## Install Gateways to enable ingress In a Kubernetes environment, the Kubernetes Ingress Resource is used to specify services that should be exposed outside the cluster. In Anthos Service Mesh, a better approach, which also works in Kubernetes and other environments, is to use a Gateway resource. A Gateway allows mesh features such as monitoring, mTLS, and advanced routing capabilities rules to be applied to traffic entering the cluster. ![img.png](.images/img2.png) - Install an ingress gateway in your cluster ```bash kubectl create namespace ingress # Label the gateway namespace with a revision label for auto-injection: kubectl label namespace ingress \ istio.io/rev=$(kubectl -n istio-system get pods -l app=istiod -o json | jq -r '.items[0].metadata.labels["istio.io/rev"]') \ --overwrite ``` - Download and apply the gateway configuration files. - These include the pods and services that will first receive the incoming requests from outside the cluster: ```bash git clone https://github.com/GoogleCloudPlatform/anthos-service-mesh-packages kubectl apply -n ingress -f anthos-service-mesh-packages/samples/gateways/istio-ingressgateway ``` - After you create the deployment, verify that the new services are working: ```bash kubectl get pod,service -n ingress # Notice the resource is a LoadBalancer. This ingress gateway uses an external TCP load balancer in GCP. ``` - Deploy the Gateway to specify the port and protocol to be used. In this case, the gateway enables HTTP traffic over port 80: - The Gateway resource must be located in the same namespace as the gateway deployment. ```yaml cat < In Istio, when an incoming request arrives at a Kubernetes cluster, it first reaches the Gateway resource, > and then the VirtualService resource. The Gateway resource receives the incoming traffic and is responsible > for routing the traffic to the correct VirtualService based on the specified rules. The VirtualService resource > then applies additional routing rules to further direct the traffic to the appropriate destination service or pod. > So in the example manifest, the incoming traffic will first reach the bookinfo-gateway Gateway resource and then > it will be directed to the bookinfo VirtualService based on the specified matching rules. The VirtualService > will then route the traffic to the appropriate destination based on the defined routing rules. - Verify that the Gateway and VirtualService have been created and notice that the VirtualService is pointing to the Gateway: ```bash kubectl get gateway,virtualservice # Save this external IP in your Cloud Shell environment: export GATEWAY_URL=$(kubectl get svc -n ingress istio-ingressgateway \ -o=jsonpath='{.status.loadBalancer.ingress[0].ip}') echo The gateway address is $GATEWAY_URL ``` - Generate some background traffic ```bash sudo apt install siege # Use siege to create traffic against your services: siege http://${GATEWAY_URL}/productpage ``` - Access the BookInfo application ```bash # Initialize the new Cloud Shell tab: export CLUSTER_NAME=gke export CLUSTER_ZONE=us-central1-b export GCLOUD_PROJECT=$(gcloud config get-value project) gcloud container clusters get-credentials $CLUSTER_NAME \ --zone $CLUSTER_ZONE --project $GCLOUD_PROJECT export GATEWAY_URL=$(kubectl get svc istio-ingressgateway \ -o=jsonpath='{.status.loadBalancer.ingress[0].ip}' -n ingress) # Confirm that the Bookinfo application responds by sending a curl request to it from some pod, within the cluster, for example from ratings: kubectl exec -it \ $(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}') \ -c ratings -- curl productpage:9080/productpage \ | grep -o ".*" # Check that the Bookinfo app responds to a curl request sent to it from outside the cluster, using the external IP saved earlier: curl -I http://${GATEWAY_URL}/productpage # Open the Bookinfo application in your browser. Run this command in the Cloud Shell to get the full URL: echo http://${GATEWAY_URL}/productpage ``` ## Use the Anthos Service Mesh dashboard view routing to multiple versions - Navigation > Anthos > Service Mesh. - Click on the productpage service, then select Connected Services on the left. - Select the OUTBOUND tab and note the two services called by the productpage pods. - Click on the reviews service. - Note the service statistics, then select the Infrastructure link on the left-hand menu. - You can see that there are multiple pods, running different versions of the reviews logic, that receive traffic sent to the reviews service. - Click on Traffic in the left-hand menu to see another view of traffic distribution. - You can see that there is relatively even distribution of traffic across the three backend pods running the different versions of the application logic. - Click on the Anthos Service Mesh logo in the upper left corner to return to the main dashboard page. - Click on the TOPOLOGY link in the upper-right corner - Rearrange the mesh graph so that you can easily view: - The productpage service going to productpage deployment - The productpage deployment going to reviews service - The reviews service going to three version of reviews ## Apply default destination rules, for all available versions - Review the configuration found in [Github](https://github.com/istio/istio/blob/master/samples/bookinfo/networking/destination-rule-all.yaml) . This configuration defines 4 DestinationRule resources, 1 for each servic - Apply the configuration with the following command in Cloud Shell: DestinationRule is an Istio resource that is used to configure traffic routing and policy rules for network traffic between Kubernetes services. It allows you to specify rules for how traffic should be routed to different versions of a service based on various criteria such as headers, URL paths, and user agents. Here are some common use cases for DestinationRules: - Traffic splitting: You can use DestinationRule to distribute traffic to different versions of a service based on the percentage of traffic you want to send to each version. - Fault injection: DestinationRule can be used to inject errors or faults into the traffic to a specific version of a service for testing purposes. - Circuit breaking: You can configure DestinationRule to apply circuit breaking rules to prevent cascading failures in your service mesh. - Traffic shaping: DestinationRule can be used to limit the amount of traffic that can be sent to a particular version of a service or to specific instances of a service. - Security: You can use DestinationRule to enforce mutual TLS authentication and other security policies for traffic between services. DestinationRule is a powerful tool for controlling how traffic flows between services in your Kubernetes cluster, and can help you improve the reliability, performance, and security of your applications. ```bash kubectl apply -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/networking/destination-rule-all.yaml # Check that 4 DestinationRule resources were defined. kubectl get destinationrules # Review the details of the destination rules: kubectl get destinationrules -o yaml ``` - Wait for 1-2 minutes, then return to the Anthos Service Mesh dashboard. - Look in both the table and topology views and confirm that the traffic continues to be evenly distributed across the three backend versions. ## Apply virtual services to route by default to only one version - Review the configuration found in [Github](https://github.com/istio/istio/blob/master/samples/bookinfo/networking/virtual-service-all-v1.yaml). This configuration defines 4 VirtualService resources, 1 for each service. - Apply the configuration with the following command in Cloud Shell: ```bash kubectl apply -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/networking/virtual-service-all-v1.yaml # Check that 4 routes, VirtualService resources, were defined: kubectl get virtualservices # In Cloud Shell, get the external IP address of the ingress gateway: echo $GATEWAY_URL ``` - Open the Bookinfo site in your browser. The URL is http://[GATEWAY_URL]/productpage, where GATEWAY_URL is the External IP address of the ingress. - Notice that the Book Reviews part of the page displays with no rating stars, no matter how many times you refresh. This is because you configured the mesh to route all traffic for the reviews service to the version reviews:v1 and this version of the service does not access the star ratings service. - Wait for 1-2 minutes, then return to the Anthos Service Mesh dashboard by selecting Navigation > Anthos > Service Mesh > reviews > Infrastructure. - Select SHOW TIMELINE and focus the chart on the last 5 minutes of traffic. You should see that the traffic goes from being evenly distributed to being routed to the version 1 workload 100% of the time. - You can also see the new traffic distribution by looking at the Traffic tab or the topology view - though these both take a couple extra minutes before the data is shown. ## Route to a specific version of a service based on user identity - Review the configuration found in [Github](https://github.com/istio/istio/blob/master/samples/bookinfo/networking/virtual-service-reviews-test-v2.yaml). This configuration defines 1 VirtualService resource. - Apply the configuration with the following command in Cloud Shell: ```bash kubectl apply -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/networking/virtual-service-reviews-test-v2.yaml # Confirm the rule is created: kubectl get virtualservice reviews ``` - Browse again to /productpage of the Bookinfo application. - This time, click Sign in, and use User Name of jason with no password. - Notice the UI shows stars from the rating service. - To better visualize the effect of the new traffic routing, you can create a new background load of authenticated requests to the service - Start a new siege session, generating only 20% of the traffic of the first, but with all requests being authenticated as jason: ```bash curl -c cookies.txt -F "username=jason" -L -X \ POST http://$GATEWAY_URL/login cookie_info=$(grep -Eo "session.*" ./cookies.txt) cookie_name=$(echo $cookie_info | cut -d' ' -f1) cookie_value=$(echo $cookie_info | cut -d' ' -f2) siege -c 5 http://$GATEWAY_URL/productpage \ --header "Cookie: $cookie_name=$cookie_value" ``` - Wait for 1-2 minutes, refresh the page showing the Infrastructure telemetry, adjust the timeline to show the current time, and then check in the Anthos Dashboard and you should see that roughly 85% of requests over the last few minutes have gone to version 1 because they are unathenticated. About 15% have gone to version two because they are made as jason. - In Cloud Shell, cancel the siege session by typing Ctrl+c. - Clean up from this task by removing the application virtual services: ```bash kubectl delete -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/networking/virtual-service-all-v1.yaml ``` - You can wait for 1-2 minutes, refresh the Anthos Service Mesh dashboard, adjust the timeline to show the current time, and confirm that traffic is once again evenly balanced across versions. ## Shift traffic gradually from one version of a microservice to another - In Cloud Shell, route all traffic to the v1 version of each service: ```bash kubectl apply -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/networking/virtual-service-all-v1.yaml ``` - Browse again to /productpage of the Bookinfo application and confirm that you do not see stars with reviews - Wait 1 minute, then refresh the Anthos Service Mesh dashboard, adjust the timeline to show the current time, and confirm that all traffic has been routed to the v1 backend. - Transfer 50% of the traffic from reviews:v1 to reviews:v3. ```bash kubectl apply -f \ https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/networking/virtual-service-reviews-50-v3.yaml ``` - Browse again to /productpage of the Bookinfo application. - Notice a roughly even distribution of reviews with no stars, from v1, and reviews with red stars, from v3, that accesses the ratings service. - Wait 1 minute, then refresh the page, adjust the timeline to show the, current time, and confirm in the Anthos Service Mesh dashboard that traffic to the reviews service is split 50/50 between v1 and v3. - Transfer the remaining 50% of traffic to reviews:v3. - Assuming you decide that the reviews:v3 service is stable, route 100% of the traffic to reviews:v3 by applying this virtual service: ```bash kubectl apply -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/networking//virtual-service-reviews-v3.yaml ``` - Test the new routing configuration using the Bookinfo UI. - Browse again to /productpage of the Bookinfo application. - Refresh the /productpage; you will always see book reviews with red colored star ratings for each review. - Wait 1 minute, refresh the page, then confirm in the Anthos Service Mesh dashboard that all traffic to the reviews service is sent to v3. - Clean up from this exercise, by removing the application virtual services. ```bash kubectl delete -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/networking/virtual-service-all-v1.yaml ``` ## Add timeouts to avoid waiting indefinitelly for service replies - In Cloud Shell, route all traffic to the v1 version of each service: ```bash kubectl apply -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/networking/virtual-service-all-v1.yaml ``` - Route requests to v2 of the reviews service, i.e., a version that calls the ratings service: ```bash kubectl apply -f - < We are auto-injecting the Istio sidecar proxy into the pods deployed in the mtls-client and mtls-service namespaces. > The Istio sidecar proxy is a container that runs alongside the application container in the same pod, > and it intercepts all inbound and outbound traffic to the pod. It is responsible for implementing the > mTLS encryption between services in the Istio service mesh, and enforcing Istio policies such as traffic management, > routing, and security. > By auto-injecting the sidecar proxy, we can ensure that all traffic between services in the mtls-client > and mtls-service namespaces is automatically encrypted and secured by Istio. This eliminates the need to > modify the application code or configuration, and makes it easy to deploy and manage services in a secure and scalable manner. > The labels added to the namespaces (istio.io/rev=${VERSION}) are used by Istio's automatic sidecar > injection feature to determine which pods should have the sidecar proxy injected into them. > The ${VERSION} variable is set based on the revision label of the istiod deployment in the istio-system namespace, > and it ensures that the correct version of the Istio sidecar proxy is injected into the pods. ```bash # get the revision label export DEPLOYMENT=$(kubectl get deployments -n istio-system | grep istiod) export VERSION=asm-$(echo $DEPLOYMENT | cut -d'-' -f 3)-$(echo $DEPLOYMENT \ | cut -d'-' -f 4 | cut -d' ' -f 1) # enable auto-injection on the namespaces kubectl label namespace mtls-client istio.io/rev=${VERSION} --overwrite kubectl label namespace mtls-service istio.io/rev=${VERSION} --overwrite ``` - Deploy the services in the mtls-* namespaces: ```bash kubectl apply -f \ https://raw.githubusercontent.com/istio/istio/release-1.6/samples/sleep/sleep.yaml \ -n mtls-client kubectl apply -f \ https://raw.githubusercontent.com/istio/istio/release-1.6/samples/httpbin/httpbin.yaml \ -n mtls-service ``` - Verify that the sleep service and the httpbin service are each deployed in both the mtls-service and legacy-service namespaces: ```bash kubectl get services --all-namespaces ``` - Verify that a sleep pod is running in the mtls-client and legacy-client namespaces and that an httpbin pod is running in the mtls-service and legacy-service namespaces: ```bash kubectl get pods --all-namespaces ``` - Verify that the two sleep clients can communicate with the two httpbin services ```bash for from in "mtls-client" "legacy-client"; do for to in "mtls-service" "legacy-service"; do kubectl exec $(kubectl get pod -l app=sleep -n ${from} -o jsonpath={.items..metadata.name}) -c sleep -n ${from} -- curl "http://httpbin.${to}:8000/ip" -s -o /dev/null -w "sleep.${from} to httpbin.${to}: %{http_code}\n" done done # Output sleep.mtls-client to httpbin.mtls-service: 200 sleep.mtls-client to httpbin.legacy-service: 200 sleep.legacy-client to httpbin.mtls-service: 200 sleep.legacy-client to httpbin.legacy-service: 200 ``` ### Understand authentication and enable service to service authentication with mTLS - In the console, go to Navigation Menu > Anthos > Service Mesh. - Under Namespace dropdown select mtls-service namespace and then click on the httpbin service located below. - In the left side panel, go to Connected Services. - Use your mouse to hover over the lock symbol in the Request port column, and verify that green means mTLS and red means plain text. - Now check out the Security tab in the left side panel. It shows you that the httpbin service has received both plaintext and mTLS traffic. - Test auto mutual TLS - By default, Istio configures destination workloads in PERMISSIVE mode. When PERMISSIVE mode is enabled a service can accept both plaintext and mTLS traffic. mTLS is used when the request contains the X-Forwarded-Client-Cert header. - Use the Cloud Shell to send a request from the sleep service in the mtls-client namespace to the httpbin service in the mtls-service namespace: ```bash kubectl exec $(kubectl get pod -l app=sleep -n mtls-client -o jsonpath={.items..metadata.name}) -c sleep -n mtls-client -- curl http://httpbin.mtls-service:8000/headers -s | grep X-Forwarded-Client-Cert # The traffic included the X-Forwarded-Client-Cert header and therefore was mutually authenticated and encrypted ``` - Now send a request from the sleep service in the mtls-client namespace to the httpbin service in the legacy-service namespace: ```bash kubectl exec $(kubectl get pod -l app=sleep -n mtls-client -o jsonpath={.items..metadata.name}) -c sleep -n mtls-client -- curl http://httpbin.legacy-service:8000/headers -s | grep X-Forwarded-Client-Cert # The X-Forwarded-Client-Cert header isn't present so the traffic was sent and received in plaintext. ``` - Finally, send a request from the sleep service in the legacy-client namespace to the httpbin service in the mtls-service namespace: ```bash kubectl exec $(kubectl get pod -l app=sleep -n legacy-client -o jsonpath={.items..metadata.name}) -c sleep -n legacy-client -- curl http://httpbin.mtls-service:8000/headers -s | grep X-Forwarded-Client-Cert # The X-Forwarded-Client-Cert header isn't present so the traffic was sent and received in plaintext ``` > Note: The httpbin service in the mtls-service namespace accepted mTLS traffic from the sleep service in the mtls-client namespace and plaintext from the sleep service in the legacy-client namespace. - Enforce STRICT mTLS mode across the service mesh - In STRICT mode, services injected with the Istio proxy will not accept plaintext traffic and will mutually authenticate with their clients. - You can enforce STRICT mTLS mode across the whole mesh or on a per-namespace basis by creating PeerAuthentication resources. ![img.png](.images/mTLS-strict-mode.png) - Create a Peer Authentication resources for the entire Service Mesh: ```bash kubectl apply -n istio-system -f - < Note: The httpbin service in the mtls-service namespace now rejects the plaintext traffic it > receives from the sleep client in the legacy-client namespace. - Remove the mesh wide mTLS PeerAuthentication resource by running this command in Cloud Shell: ```bash kubectl delete pa mesh-wide-mtls -n istio-system ``` - Enforce STRICT mTLS mode on a single namespace - In Cloud Shell create a namespace for STRICT mTLS: ```bash kubectl create ns strict-mtls-service # Enable auto-injection of the Istio sidecar proxy on the new namespace: # get the revision label export DEPLOYMENT=$(kubectl get deployments -n istio-system | grep istiod) export VERSION=asm-$(echo $DEPLOYMENT | cut -d'-' -f 3)-$(echo $DEPLOYMENT \ | cut -d'-' -f 4 | cut -d' ' -f 1) # enable auto-injection on the namespaces kubectl label namespace strict-mtls-service istio.io/rev=${VERSION} --overwrite # Use Cloud Shell to deploy another instance of the httpbin service in the strict-mtls-service namespace: kubectl apply -f \ https://raw.githubusercontent.com/istio/istio/release-1.6/samples/httpbin/httpbin.yaml \ -n strict-mtls-service # Create a PeerAuthentication resource for the strict-mtls-service namespace: kubectl apply -n strict-mtls-service -f - < Anthos > Service Mesh. - Under Namespace dropdown select strict-mtls-service namespace and then click on the httpbin service located below. - In the left side panel, click on Connected Services. - Use your mouse to hover over the lock symbol in the Request Port column to see that only mTLS traffic has been received. - Remove the strict-mtls-service peer authentication policy by running this command in Cloud Shell: ```bash kubectl delete pa restricted-mtls -n strict-mtls-service ``` ### Leverage RequestAuthentication and AuthorizationPolicy resources - This task shows you how to set up and use RequestAuthentication and AuthorizationPolicy resources. Ultimately, you will allow requests that have an approved JWT, and deny requests that don't. - A RequestAuthentication resource defines the request authentication methods that are supported by a workload. Requests with invalid authentication information will be rejected. Requests with no authentication credentials will be accepted but will not have any authenticated identity. - Create a RequestAuthentication resource for the httpbin workload in the mtls-service namespace. This policy allows the workload to accept requests with a JWT issued by testing@secure.istio.io. ```bash kubectl apply -f - < myfile.txt # Commit the file using the following Git commands: git config --global user.email "you@example.com" git config --global user.name "Your Name" git add myfile.txt git commit -m "First file using Cloud Source Repositories" myfile.txt # Once you've committed code to the local repository, add its contents to Cloud Source Repositories using the git push command: git push origin master ``` ## Browse files in the Google Cloud Source Repository ```bash gcloud source repos list ``` ## View a file in the Google Cloud repository - In the Console go to Navigation menu > Source Repositories. - Click REPO_DEMO > myfile.txt to view the file's contents in the source code browser. ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/ReadMe.md ================================================ # Managing deployments using Kubernetes Engine [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - Quest - DevOps Essentials](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Learn about deployment object - Create a deployment - Rolling Update - Canary deployments - Blue/Green deployments **Skills** - gcp - kubernetes - canary deployments - blue green deployment ## Set the zone ```bash gcloud config set compute/zone us-east5-b ``` ## Get sample code for this lab ```bash gsutil -m cp -r gs://spls/gsp053/orchestrate-with-kubernetes . cd orchestrate-with-kubernetes/kubernetes # Create a cluster with 3 nodes (this will take a few minutes to complete): gcloud container clusters create bootcamp \ --machine-type e2-small \ --num-nodes 3 \ --scopes "https://www.googleapis.com/auth/projecthosting,storage-rw" ``` ## Learn about the deployment object ```bash kubectl explain deployment # We can also see all of the fields using the --recursive option: kubectl explain deployment --recursive # We can also see the documentation for a specific field: kubectl explain deployment.spec.replicas ``` ## Create a deployment ```bash # change to auth:1.0.0 kubectl create -f deployments/auth.yaml kubectl get deployments # When you create a Deployment in Kubernetes, it automatically creates a #ReplicaSet as well. The ReplicaSet is responsible for maintaining a #specified number of replicas of the Pods defined in the Deployment. # The Deployment object provides declarative updates for Pods and # ReplicaSets, and manages the creation and scaling of ReplicaSets # based on the user's desired state. When a Deployment is updated with a # new desired state, it creates a new ReplicaSet and gradually scales it # up while scaling down the old ReplicaSet, ensuring that the transition # between the old and new state is smooth and does not cause downtime. kubectl get replicasets kubectl get pods kubectl create -f services/auth.yaml # Now, do the same thing to create and expose the hello deployment: kubectl create -f deployments/hello.yaml kubectl create -f services/hello.yaml # And one more time to create and expose the frontend deployment: kubectl create secret generic tls-certs --from-file tls/ kubectl create configmap nginx-frontend-conf --from-file=nginx/frontend.conf kubectl create -f deployments/frontend.yaml kubectl create -f services/frontend.yaml kubectl get secret tls-certs -o yaml ``` - Interact with the frontend by grabbing its external IP and then curling to it: ```bash kubectl get services frontend curl -ks https:// # OR curl -ks https://`kubectl get svc frontend -o=jsonpath="{.status.loadBalancer.ingress[0].ip}"` ``` - Scale a deployment ```bash kubectl explain deployment.spec.replicas kubectl scale deployment hello --replicas=5 # verify kubectl get pods | grep hello- | wc -l # scale down kubectl scale deployment hello --replicas=3 # Again, verify that you have the correct number of Pods: kubectl get pods | grep hello- | wc -l ``` ## Rolling update ![img.png](.images/rolling-update.png) ```bash kubectl edit deployment hello # change image to hello:2.0.0 # See the new ReplicaSet that Kubernetes creates.: kubectl get replicaset # You can also see a new entry in the rollout history: kubectl rollout history deployment hello ``` - Pause a rolling update - If you detect problems with a running rollout, pause it to stop the update. ```bash kubectl rollout pause deployment/hello # Verify the current state of the rollout: kubectl rollout status deployment/hello # You can also verify this on the Pods directly: kubectl get pods -o jsonpath --template='{range .items[*]}{.metadata.name}{"\t"}{"\t"}{.spec.containers[0].image}{"\n"}{end}' ``` - Resume a rolling update ```bash kubectl rollout resume deployment/hello # When the rollout is complete, you should see the following when running the status command: kubectl rollout status deployment/hello ``` - Rollback an update ```bash kubectl rollout undo deployment/hello # Verify the roll back in the history: kubectl rollout history deployment hello # Finally, verify that all the Pods have rolled back to their previous versions: kubectl get pods -o jsonpath --template='{range .items[*]}{.metadata.name}{"\t"}{"\t"}{.spec.containers[0].image}{"\n"}{end}' ``` ## Canary deployments ![img.png](.images/canary-deployments.png) ```bash kubectl create -f deployments/hello-canary.yaml # On the hello service, the selector uses the app:hello selector which will match pods in both the prod # deployment and canary deployment. However, because the canary deployment has a fewer number of pods, # it will be visible to fewer users # You can verify the hello version being served by the request: curl -ks https://`kubectl get svc frontend -o=jsonpath="{.status.loadBalancer.ingress[0].ip}"`/version ``` - Canary deployments in production - session affinity - In this lab, each request sent to the Nginx service had a chance to be served by the canary deployment. But what if you wanted to ensure that a user didn't get served by the Canary deployment? - You can do this by creating a service with session affinity. This way the same user will always be served from the same version. In the example below the service is the same as before, but a new sessionAffinity field has been added, and set to ClientIP. All clients with the same IP address will have their requests sent to the same version of the hello application. ```yaml kind: Service apiVersion: v1 metadata: name: "hello" spec: sessionAffinity: ClientIP selector: app: "hello" ports: - protocol: "TCP" port: 80 targetPort: 80 ``` ## Blue-green deployments ![img.png](.images/blue-green-deployments.png) ```bash # A major downside of blue-green deployments is that you will need to have at least 2x the # resources in your cluster necessary to host your application kubectl apply -f services/hello-blue.yaml kubectl create -f deployments/hello-green.yaml # Once you have a green deployment and it has started up properly, verify that the current version of 1.0.0 is still being used: curl -ks https://`kubectl get svc frontend -o=jsonpath="{.status.loadBalancer.ingress[0].ip}"`/version # Now, update the service to point to the new version: kubectl apply -f services/hello-green.yaml # When the service is updated, the "green" deployment will be used immediately. You can now verify that the new version is always being used: curl -ks https://`kubectl get svc frontend -o=jsonpath="{.status.loadBalancer.ingress[0].ip}"`/version ``` - Blue-Green rollback ```bash # While the "blue" deployment is still running, just update the service back to the old version: kubectl apply -f services/hello-blue.yaml # Once you have updated the service, your rollback will have been successful. # Again, verify that the right version is now being used: curl -ks https://`kubectl get svc frontend -o=jsonpath="{.status.loadBalancer.ingress[0].ip}"`/version ``` ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/cleanup.sh ================================================ kubectl delete pods healthy-monolith monolith secure-monolith kubectl delete services monolith auth frontend hello kubectl delete deployments auth frontend hello hello-canary hello-green kubectl delete secrets tls-certs kubectl delete configmaps nginx-frontend-conf nginx-proxy-conf ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/deployments/auth.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: name: auth spec: replicas: 1 selector: matchLabels: app: auth template: metadata: labels: app: auth track: stable spec: containers: - name: auth image: "kelseyhightower/auth:2.0.0" ports: - name: http containerPort: 80 - name: health containerPort: 81 resources: limits: cpu: 0.2 memory: "10Mi" livenessProbe: httpGet: path: /healthz port: 81 scheme: HTTP initialDelaySeconds: 5 periodSeconds: 15 timeoutSeconds: 5 readinessProbe: httpGet: path: /readiness port: 81 scheme: HTTP initialDelaySeconds: 5 timeoutSeconds: 1 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/deployments/frontend.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: name: frontend spec: replicas: 1 selector: matchLabels: app: frontend template: metadata: labels: app: frontend track: stable spec: containers: - name: nginx image: "nginx:1.9.14" lifecycle: preStop: exec: command: ["/usr/sbin/nginx","-s","quit"] volumeMounts: - name: "nginx-frontend-conf" mountPath: "/etc/nginx/conf.d" - name: "tls-certs" mountPath: "/etc/tls" volumes: - name: "tls-certs" secret: secretName: "tls-certs" - name: "nginx-frontend-conf" configMap: name: "nginx-frontend-conf" items: - key: "frontend.conf" path: "frontend.conf" ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/deployments/hello-canary.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: name: hello-canary spec: replicas: 1 selector: matchLabels: app: hello template: metadata: labels: app: hello track: canary version: 2.0.0 spec: containers: - name: hello image: kelseyhightower/hello:2.0.0 ports: - name: http containerPort: 80 - name: health containerPort: 81 resources: limits: cpu: 0.2 memory: 10Mi livenessProbe: httpGet: path: /healthz port: 81 scheme: HTTP initialDelaySeconds: 5 periodSeconds: 15 timeoutSeconds: 5 readinessProbe: httpGet: path: /readiness port: 81 scheme: HTTP initialDelaySeconds: 5 timeoutSeconds: 1 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/deployments/hello-green.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: name: hello-green spec: replicas: 3 selector: matchLabels: app: hello template: metadata: labels: app: hello track: stable version: 2.0.0 spec: containers: - name: hello image: kelseyhightower/hello:2.0.0 ports: - name: http containerPort: 80 - name: health containerPort: 81 resources: limits: cpu: 0.2 memory: 10Mi livenessProbe: httpGet: path: /healthz port: 81 scheme: HTTP initialDelaySeconds: 5 periodSeconds: 15 timeoutSeconds: 5 readinessProbe: httpGet: path: /readiness port: 81 scheme: HTTP initialDelaySeconds: 5 timeoutSeconds: 1 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/deployments/hello.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: name: hello spec: replicas: 3 selector: matchLabels: app: hello template: metadata: labels: app: hello track: stable version: 1.0.0 spec: containers: - name: hello image: "kelseyhightower/hello:1.0.0" ports: - name: http containerPort: 80 - name: health containerPort: 81 resources: limits: cpu: 0.2 memory: "10Mi" livenessProbe: httpGet: path: /healthz port: 81 scheme: HTTP initialDelaySeconds: 5 periodSeconds: 15 timeoutSeconds: 5 readinessProbe: httpGet: path: /readiness port: 81 scheme: HTTP initialDelaySeconds: 5 timeoutSeconds: 1 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/nginx/frontend.conf ================================================ upstream hello { server hello.default.svc.cluster.local; } upstream auth { server auth.default.svc.cluster.local; } server { listen 443; ssl on; ssl_certificate /etc/tls/cert.pem; ssl_certificate_key /etc/tls/key.pem; location / { proxy_pass http://hello; } location /login { proxy_pass http://auth; } } ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/nginx/proxy.conf ================================================ server { listen 443; ssl on; ssl_certificate /etc/tls/cert.pem; ssl_certificate_key /etc/tls/key.pem; location / { proxy_pass http://127.0.0.1:80; } } ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/pods/healthy-monolith.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: "healthy-monolith" labels: app: monolith spec: containers: - name: monolith image: kelseyhightower/monolith:1.0.0 ports: - name: http containerPort: 80 - name: health containerPort: 81 resources: limits: cpu: 0.2 memory: "10Mi" livenessProbe: httpGet: path: /healthz port: 81 scheme: HTTP initialDelaySeconds: 5 periodSeconds: 15 timeoutSeconds: 5 readinessProbe: httpGet: path: /readiness port: 81 scheme: HTTP initialDelaySeconds: 5 timeoutSeconds: 1 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/pods/monolith.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: monolith labels: app: monolith spec: containers: - name: monolith image: kelseyhightower/monolith:1.0.0 args: - "-http=0.0.0.0:80" - "-health=0.0.0.0:81" - "-secret=secret" ports: - name: http containerPort: 80 - name: health containerPort: 81 resources: limits: cpu: 0.2 memory: "10Mi" ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/pods/secure-monolith.yaml ================================================ apiVersion: v1 kind: Pod metadata: name: "secure-monolith" labels: app: monolith spec: containers: - name: nginx image: "nginx:1.9.14" lifecycle: preStop: exec: command: ["/usr/sbin/nginx","-s","quit"] volumeMounts: - name: "nginx-proxy-conf" mountPath: "/etc/nginx/conf.d" - name: "tls-certs" mountPath: "/etc/tls" - name: monolith image: "kelseyhightower/monolith:1.0.0" ports: - name: http containerPort: 80 - name: health containerPort: 81 resources: limits: cpu: 0.2 memory: "10Mi" livenessProbe: httpGet: path: /healthz port: 81 scheme: HTTP initialDelaySeconds: 5 periodSeconds: 15 timeoutSeconds: 5 readinessProbe: httpGet: path: /readiness port: 81 scheme: HTTP initialDelaySeconds: 5 timeoutSeconds: 1 volumes: - name: "tls-certs" secret: secretName: "tls-certs" - name: "nginx-proxy-conf" configMap: name: "nginx-proxy-conf" items: - key: "proxy.conf" path: "proxy.conf" ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/services/auth.yaml ================================================ kind: Service apiVersion: v1 metadata: name: "auth" spec: selector: app: "auth" ports: - protocol: "TCP" port: 80 targetPort: 80 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/services/frontend.yaml ================================================ kind: Service apiVersion: v1 metadata: name: "frontend" spec: selector: app: "frontend" ports: - protocol: "TCP" port: 443 targetPort: 443 type: LoadBalancer ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/services/hello-blue.yaml ================================================ kind: Service apiVersion: v1 metadata: name: "hello" spec: selector: app: "hello" version: 1.0.0 ports: - protocol: "TCP" port: 80 targetPort: 80 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/services/hello-green.yaml ================================================ kind: Service apiVersion: v1 metadata: name: hello spec: selector: app: hello version: 2.0.0 ports: - protocol: TCP port: 80 targetPort: 80 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/services/hello.yaml ================================================ kind: Service apiVersion: v1 metadata: name: "hello" spec: selector: app: "hello" ports: - protocol: "TCP" port: 80 targetPort: 80 ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_028_managing_deployments_using_kubernetes_engine/services/monolith.yaml ================================================ kind: Service apiVersion: v1 metadata: name: "monolith" spec: selector: app: "monolith" secure: "enabled" ports: - protocol: "TCP" port: 443 targetPort: 443 nodePort: 31000 type: NodePort ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_029_trouble_shooting_workloads_on_gke_for_sre/ReadMe.md ================================================ # Troubleshooting Workloads on GKE for Site Reliability Engineers [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - Quest - DevOps Essentials](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Access operational data through GKE Dashboards - Proactive monitoring with logs-based metrics - Create a SLO - Define an alert on the SLO **Skills** - gcp - log based metrics - sre - gke - monitoring - slo - alerting - troubleshooting ## Navigating Google Kubernetes Engine (GKE) resource pages - In Cloud Console, from the Navigation menu go to Kubernetes Engine > Clusters. - Confirm that you see the following Kubernetes cluster available: cloud-ops-sandbox. Validate that each cluster has a green checkbox next to it to indicate it is up and running. - Click on the cloud-ops-sandbox link under the Name column to navigate to the cluster's Details tab. ## Accessing operational data through GKE Dashboards - Navigate to Navigation menu > Kubernetes Engine > Services & Ingress. Click on the Endpoint (an IP address) for the frontend-external service. - Click on any product displayed on the landing page to reproduce the error reported. - Navigate to Cloud Monitoring from Cloud Console, from the Navigation Menu go to Monitoring > Dashboards. - When the Dashboards landing page opens, click GKE. - Click on the Add Filter button at the top of the GKE Dashboard page. - From the available filters, select Workloads > recommendationservice. - You will re-deploy the recommendationservice microservice to ensure that the error is no longer present. ```bash git clone --depth 1 --branch csb_1220 https://github.com/GoogleCloudPlatform/cloud-ops-sandbox.git cd cloud-ops-sandbox/sre-recipes # Connect to cluster ./sandboxctl sre-recipes restore "recipe3" # Check the service back again ``` ## Proactive monitoring with logs-based metrics - From Cloud Console, click on the Navigation Menu > Logging > Logs Explorer. - In the Query results section click on +Create metric. This will open a new tab to create a logs based metric. - Enter the following options on the Create logs metric page: - Metric Type: Counter - Log metric name: Error_Rate_SLI - Filter Selection: (Copy and paste the filter below) ```bash resource.labels.cluster_name="cloud-ops-sandbox" AND resource.labels.namespace_name="default" AND resource.type="k8s_container" AND labels.k8s-pod/app="recommendationservice" AND severity>=ERROR ``` ## Creating a SLO - Navigate to Navigation menu > Monitoring > Services. The resulting page will display a list of all services deployed to GKE for the application workload. - Choose a metric: Other - Request-based or windows-based: Request Based - the Performance Metric must be set to the following value: custom.googleapis.com/opencensus/grpc.io/client/roundtrip_latency. This will show the roundtrip latency of requests made by the client to the recommendation service. - Set the Performance metric to Less than -∞ to 100 ms. - Period type: Calendar - Period length: Calendar month - Performance Goal: 99% ## Define an alert on the SLO - Navigate to Navigation menu > Monitoring > Services. ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_030_minimal_nodejs_app_dockerize_google_artifact_registry/ReadMe.md ================================================ # Minimal NodeJS App - Dockerize - Google Artifact Registry [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - Lab - Introduction to Docker](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Build a minimal NodeJS app - Dockerize the app - Run locally and debug using docker - Push to Google Artifact Registry **Skills** - gcp - docker - nodejs - artifact registry ## Build ```bash mkdir test && cd test cat > Dockerfile < app.js < { res.statusCode = 200; res.setHeader('Content-Type', 'text/plain'); res.end('Hello World\n'); }); server.listen(port, hostname, () => { console.log('Server running at http://%s:%s/', hostname, port); }); process.on('SIGINT', function() { console.log('Caught interrupt signal and will exit'); process.exit(); }); EOF ``` - Docker build ```bash docker build -t node-app:0.1 . ``` - Now, run the following command to look at the images you built: ```bash docker images ``` ## Run ```bash docker run -p 4000:80 --name my-app node-app:0.1 ``` - Open another terminal (in Cloud Shell, click the + icon), and test the server: ```bash curl http://localhost:4000 ``` - Close the initial terminal and then run the following command to stop and remove the container: ```bash docker stop my-app && docker rm my-app ``` - Now run the following command to start the container in the background: ```yaml docker run -p 4000:80 --name my-app -d node-app:0.1 docker ps ``` - Notice the container is running in the output of docker ps. You can look at the logs by executing docker logs [container_id]. ```bash docker logs [container_id] ``` - In your Cloud Shell, open the test directory you created earlier in the lab: ```bash cd test ``` - Edit app.js with a text editor of your choice (for example nano or vim) and replace "Hello World" with another string: ```bash .... const server = http.createServer((req, res) => { res.statusCode = 200; res.setHeader('Content-Type', 'text/plain'); res.end('Welcome to Cloud\n'); }); .... ``` - Build this new image and tag it with 0.2: ```bash docker build -t node-app:0.2 . ``` - Run another container with the new image version. Notice how we map the host's port 8080 instead of 80. You can't use host port 4000 because it's already in use. ```bash docker run -p 8080:80 --name my-app-2 -d node-app:0.2 docker ps ``` - Test the containers: ```bash curl http://localhost:8080 ## And now test the first container you made: curl http://localhost:4000 ``` ## Debug - You can look at the logs of a container using docker logs [container_id]. If you want to follow the log's output as the container is running, use the -f option. ```bash docker logs -f [container_id] ``` - You can use docker exec to do this. ```bash docker exec -it [container_id] bash ## look at dir ls ``` - You can examine a container's metadata in Docker by using Docker inspect: ```bash docker inspect [container_id] ``` - Use --format to inspect specific fields from the returned JSON. For example: ```bash docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' [container_id] ``` ## Publish - Create the target Docker repository - From the Navigation Menu, under CI/CD navigate to Artifact Registry > Repositories. - Specify my-repository as the repository name. - Configure authentication ```bash # To set up authentication to Docker repositories in the region us-central1, run the following command in Cloud Shell: gcloud auth configure-docker us-central1-docker.pkg.dev ``` - Push the container to Artifact Registry ```bash export PROJECT_ID=$(gcloud config get-value project) cd ~/test # Run the command to tag node-app:0.2. docker build -t us-central1-docker.pkg.dev/$PROJECT_ID/my-repository/node-app:0.2 . docker images # Push this image to Artifact Registry. docker push us-central1-docker.pkg.dev/$PROJECT_ID/my-repository/node-app:0.2 ``` - Verify the image was pushed in the Artifact Registry console. - Test the image ```bash # Stop and remove all containers: docker stop $(docker ps -q) docker rm $(docker ps -aq) ``` - Run the following command to remove all of the Docker images. ```bash docker rmi us-central1-docker.pkg.dev/$PROJECT_ID/my-repository/node-app:0.2 docker rmi node:lts docker rmi -f $(docker images -aq) # remove remaining images docker images ``` - Pull the image and run it. ```bash docker pull us-central1-docker.pkg.dev/$PROJECT_ID/my-repository/node-app:0.2 docker run -p 4000:80 -d us-central1-docker.pkg.dev/$PROJECT_ID/my-repository/node-app:0.2 curl http://localhost:4000 ``` ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_030_minimal_nodejs_app_dockerize_google_artifact_registry/test/Dockerfile ================================================ # Use an official Node runtime as the parent image FROM node:lts # Set the working directory in the container to /app WORKDIR /app # Copy the current directory contents into the container at /app ADD . /app # Make the container's port 80 available to the outside world EXPOSE 80 # Run app.js using node when the container launches CMD ["node", "app.js"] ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_030_minimal_nodejs_app_dockerize_google_artifact_registry/test/app.js ================================================ /* This is a Node.js server that listens for HTTP requests on port 80 (the standard HTTP port) and responds with a "Hello World" message in plain text format */ // imports the Node.js http module, which provides functionality for creating an HTTP server. const http = require('http'); // defines two constants, hostname and port, which specify the address and port number that the server will listen on const hostname = '0.0.0.0'; const port = 80; // creates an HTTP server using the http.createServer() method, which takes a callback // function as its argument. // This callback function is called whenever a client makes a request to the server. const server = http.createServer((req, res) => { // The callback function sets the HTTP response status code to 200 (OK), res.statusCode = 200; //sets the Content-Type header to text/plain, res.setHeader('Content-Type', 'text/plain'); // and sends the "Hello World" message as the response body. res.end('Hello World\n'); }); // The server.listen() method is called to start the server listening on the // specified hostname and port number. It also takes a callback function that // is called once the server starts listening. // This callback function just logs a message to the console to indicate that the server is running. server.listen(port, hostname, () => { console.log('Server running at http://%s:%s/', hostname, port); }); // Finally, a SIGINT event listener is added to the process object. // This listener is triggered when the user presses Ctrl-C to stop the server. // When the listener is triggered, it logs a message to the console and exits the process. process.on('SIGINT', function() { console.log('Caught interrupt signal and will exit'); process.exit(); }); ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_031_hello_node_kubernetes__node/Dockerfile ================================================ # Use Node.js v6.9.2 as base image FROM node:6.9.2 # Expose port 8080 for incoming traffic EXPOSE 8080 # Copy the server.js file from the current directory to the image COPY server.js . # Set the default command to run the server.js file with Node.js CMD node server.js ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_031_hello_node_kubernetes__node/ReadMe.md ================================================ # Hello Node Kubernetes [https://www.cloudskillsboost.google](https://www.cloudskillsboost.google) [Select - Lab - Hello Node Kubernetes](https://www.cloudskillsboost.google/paths) **High Level Objectives** - Create a Node.js server. - Create a Docker container image. - Create a container cluster. - Create a Kubernetes pod. - Scale up your services. **Skills** - gcp - kubernetes - docker - nodejs - pods - deployments - services - scaling - load balancing how the pieces fit together with one another ![img.png](.images/moving-parts.png) ## Create Nodejs application ```bash touch server.js # copy the file contents node server.js # run the server # in another terminal session curl localhost:8080 ``` ## Create Dockerfile ```Dockefile # Use Node.js v6.9.2 as base image FROM node:6.9.2 # Expose port 8080 for incoming traffic EXPOSE 8080 # Copy the server.js file from the current directory to the image COPY server.js . # Set the default command to run the server.js file with Node.js CMD node server.js ``` - Build and Run ```bash export PROJECT_ID=$(gcloud config get-value project -q) # Build the image with the following, replacing PROJECT_ID with your Project ID docker build -t gcr.io/$PROJECT_ID/hello-node:v1 . # Run the following command replacing PROJECT_ID with your Project ID docker run -d -p 8080:8080 gcr.io/$PROJECT_ID/hello-node:v1 # Test curl http://localhost:8080 # Check running containers docker ps # Stop the container docker stop # Push # Run this command, replacing PROJECT_ID with your Project ID, gcloud auth configure-docker docker push gcr.io/$PROJECT_ID/hello-node:v1 ``` ## Create your cluster ```bash ## Replace PROJECT_ID gcloud config set project $PROJECT_ID ## Create a cluster with two n1-standard-1 nodes (this will take a few minutes to complete): gcloud container clusters create hello-world \ --num-nodes 2 \ --machine-type n1-standard-1 \ --zone us-central1-a # Check the cluster in the Cloud Console ``` ## Create your pod ```bash # Create a pod with the kubectl run command ## Replace project ID kubectl create deployment hello-node \ --image=gcr.io/$PROJECT_ID/hello-node:v1 # Check deployment kubectl get deployments # pods kubectl get pods ``` ## Allow external traffic ```bash # From Cloud Shell you can expose the pod to the public internet with the kubectl expose # command combined with the --type="LoadBalancer" flag. # This flag is required for the creation of an externally accessible IP kubectl expose deployment hello-node --type="LoadBalancer" --port=8080 # Check services kubectl get services # Test application curl http://:8080 ``` ## Scale up your service ```bash # Scale up your service to four replicas kubectl scale deployment hello-node --replicas=4 # Check deployment kubectl get deployment # Check pods kubectl get pods ``` State of our cluster ![img.png](.images/state-of-cluster.png) ## Roll out an upgrade to your service - Change the server.js file to return a different message ```bash response.end("Hello Kubernetes World!"); ``` - Build and push the new image ```bash docker build -t gcr.io/$PROJECT_ID/hello-node:v2 . docker push gcr.io/$PROJECT_ID/hello-node:v2 ``` - Edit the deployment to use the new image ```bash kubectl set image deployment/hello-node hello-node=gcr.io/$PROJECT_ID/hello-node:v2 # OR kubectl edit deployment hello-node # change the image to gcr.io/PROJECT_ID/hello-node:v2 ``` - Check the deployment ```bash kubectl get deployments ``` ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_031_hello_node_kubernetes__node/server.js ================================================ /* This is a basic Node.js code that creates an HTTP server and listens for incoming requests on port 8080. When a request is received, it responds with a HTTP 200 status code and the message "Hello World!". */ // we are importing the Node.js http module and defining a function called handleRequest // which takes in two arguments, request and response. When a request is received by the server, // this function is called to handle the request. var http = require('http'); // Inside the handleRequest function, the response is set to return a HTTP 200 // status code using the writeHead method of the response object, and the response // body is set to "Hello World!" using the end method of the response object. var handleRequest = function(request, response) { response.writeHead(200); response.end("Hello World!"); } // The http.createServer method is used to create an HTTP server and // assign the handleRequest function as the request handler. Finally, // the server is started by calling the listen method of the server object // and specifying the port to listen on. var www = http.createServer(handleRequest); www.listen(8080); ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_032_setting_up_jenkins_on_kubernetes_engine/ReadMe.md ================================================ **High Level Objectives** - Prepare the environment - Configure Helm - Configure and install Jenkins - Connect to Jenkins **Skills** - gcp - kubernetes - docker - nodejs - pods - jenkins - helm - deployments - services ## Prepare the environment ```bash # Set the default Compute Engine zone to us-central1-c: gcloud config set compute/zone us-central1-c # Clone the sample code: git clone https://github.com/GoogleCloudPlatform/continuous-deployment-on-kubernetes.git # Navigate to the sample code directory: cd continuous-deployment-on-kubernetes ``` - Creating a Kubernetes cluster ```bash gcloud container clusters create jenkins-cd \ --num-nodes 2 \ --scopes "https://www.googleapis.com/auth/projecthosting,cloud-platform" ``` - confirm cluster is running ```bash gcloud container clusters list ``` - Get the credentials for your cluster. Kubernetes Engine uses these credentials to access your newly provisioned cluster. ```bash gcloud container clusters get-credentials jenkins-cd # Verify that you can access your cluster by running the following command: kubectl cluster-info ``` ## Configure Helm - Add Helm's jenkins chart repository: ```bash helm repo add jenkins https://charts.jenkins.io # Update the repo to ensure you get the latest list of charts: helm repo update ``` ## Configure and install Jenkins ```bash # Use the Helm CLI to deploy the chart with your configuration set: helm upgrade --install -f jenkins/values.yaml myjenkins jenkins/jenkins ``` - Once that command completes ensure the Jenkins pod goes to the Running state and the container is in the READY state. ```bash kubectl get pods ``` - Run the following command to setup port forwarding to the Jenkins UI from the Cloud Shell: ```bash echo http://127.0.0.1:8080 kubectl --namespace default port-forward svc/myjenkins 8080:8080 >> /dev/null & ``` - Now, check that the Jenkins Service was created properly: ```bash kubectl get svc ``` ## Connect to Jenkins - The Jenkins chart will automatically create an admin password for you. To retrieve it, run: ```bash kubectl exec --namespace default -it svc/myjenkins -c jenkins -- /bin/cat /run/secrets/additional/chart-admin-password && echo ``` ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_032_setting_up_jenkins_on_kubernetes_engine/values.yaml ================================================ controller: installPlugins: - kubernetes:latest - workflow-job:latest - workflow-aggregator:latest - credentials-binding:latest - git:latest - google-oauth-plugin:latest - google-source-plugin:latest - google-kubernetes-engine:latest - google-storage-plugin:latest resources: requests: cpu: "50m" memory: "1024Mi" limits: cpu: "1" memory: "3500Mi" javaOpts: "-Xms3500m -Xmx3500m" serviceType: ClusterIP agent: resources: requests: cpu: "500m" memory: "256Mi" limits: cpu: "1" memory: "512Mi" persistence: size: 100Gi serviceAccount: name: cd-jenkins ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_033_continuous_delivery_with_jenkins_in_kubernetes_engine/Jenkinsfile ================================================ pipeline { environment { PROJECT = "REPLACE_WITH_YOUR_PROJECT_ID" APP_NAME = "gceme" FE_SVC_NAME = "${APP_NAME}-frontend" CLUSTER = "jenkins-cd" CLUSTER_ZONE = "us-east1-d" IMAGE_TAG = "gcr.io/${PROJECT}/${APP_NAME}:${env.BRANCH_NAME}.${env.BUILD_NUMBER}" JENKINS_CRED = "${PROJECT}" } agent { kubernetes { label 'sample-app' defaultContainer 'jnlp' yaml """ apiVersion: v1 kind: Pod metadata: labels: component: ci spec: # Use service account that can deploy to all namespaces serviceAccountName: cd-jenkins containers: - name: golang image: golang:1.10 command: - cat tty: true - name: gcloud image: gcr.io/cloud-builders/gcloud command: - cat tty: true - name: kubectl image: gcr.io/cloud-builders/kubectl command: - cat tty: true """ } } stages { stage('Test') { steps { container('golang') { sh """ ln -s `pwd` /go/src/sample-app cd /go/src/sample-app go test """ } } } stage('Build and push image with Container Builder') { steps { container('gcloud') { sh "PYTHONUNBUFFERED=1 gcloud builds submit -t ${IMAGE_TAG} ." } } } stage('Deploy Canary') { // Canary branch when { branch 'canary' } steps { container('kubectl') { // Change deployed image in canary to the one we just built sh("sed -i.bak 's#corelab/gceme:1.0.0#${IMAGE_TAG}#' ./k8s/canary/*.yaml") step([$class: 'KubernetesEngineBuilder', namespace:'production', projectId: env.PROJECT, clusterName: env.CLUSTER, zone: env.CLUSTER_ZONE, manifestPattern: 'k8s/services', credentialsId: env.JENKINS_CRED, verifyDeployments: false]) step([$class: 'KubernetesEngineBuilder', namespace:'production', projectId: env.PROJECT, clusterName: env.CLUSTER, zone: env.CLUSTER_ZONE, manifestPattern: 'k8s/canary', credentialsId: env.JENKINS_CRED, verifyDeployments: true]) sh("echo http://`kubectl --namespace=production get service/${FE_SVC_NAME} -o jsonpath='{.status.loadBalancer.ingress[0].ip}'` > ${FE_SVC_NAME}") } } } stage('Deploy Production') { // Production branch when { branch 'master' } steps{ container('kubectl') { // Change deployed image in canary to the one we just built sh("sed -i.bak 's#corelab/gceme:1.0.0#${IMAGE_TAG}#' ./k8s/production/*.yaml") step([$class: 'KubernetesEngineBuilder', namespace:'production', projectId: env.PROJECT, clusterName: env.CLUSTER, zone: env.CLUSTER_ZONE, manifestPattern: 'k8s/services', credentialsId: env.JENKINS_CRED, verifyDeployments: false]) step([$class: 'KubernetesEngineBuilder', namespace:'production', projectId: env.PROJECT, clusterName: env.CLUSTER, zone: env.CLUSTER_ZONE, manifestPattern: 'k8s/production', credentialsId: env.JENKINS_CRED, verifyDeployments: true]) sh("echo http://`kubectl --namespace=production get service/${FE_SVC_NAME} -o jsonpath='{.status.loadBalancer.ingress[0].ip}'` > ${FE_SVC_NAME}") } } } stage('Deploy Dev') { // Developer Branches when { not { branch 'master' } not { branch 'canary' } } steps { container('kubectl') { // Create namespace if it doesn't exist sh("kubectl get ns ${env.BRANCH_NAME} || kubectl create ns ${env.BRANCH_NAME}") // Don't use public load balancing for development branches sh("sed -i.bak 's#LoadBalancer#ClusterIP#' ./k8s/services/frontend.yaml") sh("sed -i.bak 's#corelab/gceme:1.0.0#${IMAGE_TAG}#' ./k8s/dev/*.yaml") step([$class: 'KubernetesEngineBuilder', namespace: "${env.BRANCH_NAME}", projectId: env.PROJECT, clusterName: env.CLUSTER, zone: env.CLUSTER_ZONE, manifestPattern: 'k8s/services', credentialsId: env.JENKINS_CRED, verifyDeployments: false]) step([$class: 'KubernetesEngineBuilder', namespace: "${env.BRANCH_NAME}", projectId: env.PROJECT, clusterName: env.CLUSTER, zone: env.CLUSTER_ZONE, manifestPattern: 'k8s/dev', credentialsId: env.JENKINS_CRED, verifyDeployments: true]) echo 'To access your environment run `kubectl proxy`' echo "Then access your service via http://localhost:8001/api/v1/proxy/namespaces/${env.BRANCH_NAME}/services/${FE_SVC_NAME}:80/" } } } } } ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_033_continuous_delivery_with_jenkins_in_kubernetes_engine/ReadMe.md ================================================ # Continuous Delivery with Jenkins in Kubernetes Engine **High Level Objectives** - **Skills** - gcp ## Download the source code - run the following command to set your zone us-east1-c: ```bash gcloud config set compute/zone us-east1-c # Then copy the lab's sample code: gsutil cp gs://spls/gsp051/continuous-deployment-on-kubernetes.zip . unzip continuous-deployment-on-kubernetes.zip cd continuous-deployment-on-kubernetes ``` ## Provisioning Jenkins ```bash # command to provision a Kubernetes cluster: gcloud container clusters create jenkins-cd \ --num-nodes 2 \ --machine-type n1-standard-2 \ --scopes "https://www.googleapis.com/auth/source.read_write,cloud-platform" # confirm that your cluster is running by executing the following command: gcloud container clusters list # Now, get the credentials for your cluster: gcloud container clusters get-credentials jenkins-cd # confirm that you can connect to it by running the following command kubectl cluster-info ``` ## Setup Helm ```bash # Add Helm's stable chart repo: helm repo add jenkins https://charts.jenkins.io # Ensure the repo is up to date: helm repo update ``` ## Configure and Install Jenkins values file to automatically configure your Kubernetes Cloud and add the following necessary plugins: - Kubernetes:latest - Workflow-multibranch:latest - Git:latest - Configuration-as-code:latest - Google-oauth-plugin:latest - Google-source-plugin:latest - Google-storage-plugin:latest ```bash # Use the Helm CLI to deploy the chart with your configuration settings: helm install cd jenkins/jenkins -f jenkins/values.yaml --wait # Check pods kubectl get pods # Configure the Jenkins service account to be able to deploy to the cluster: kubectl create clusterrolebinding jenkins-deploy --clusterrole=cluster-admin --serviceaccount=default:cd-jenkins # Run the following command to setup port forwarding to the Jenkins UI from the Cloud Shell: export POD_NAME=$(kubectl get pods --namespace default -l "app.kubernetes.io/component=jenkins-master" -l "app.kubernetes.io/instance=cd" -o jsonpath="{.items[0].metadata.name}") kubectl port-forward $POD_NAME 8080:8080 >> /dev/null & # check that the Jenkins Service was created properly: kubectl get svc ``` ## Connect to Jenkins ```bash # Jenkins chart will automatically create an admin password for you. To retrieve it, run: printf $(kubectl get secret cd-jenkins -o jsonpath="{.data.jenkins-admin-password}" | base64 --decode);echo # If asked, log in with username admin and your auto-generated password. ``` ## Understanding the Application ## Deploying the Application You will deploy the application into two different environments: - Production: The live site that your users access. - Canary: A smaller-capacity site that receives only a percentage of your user traffic. Use this environment to validate your software with live traffic before it's released to all of your users. ```bash cd sample-app ## Create the Kubernetes namespace to logically isolate the deployment: kubectl create ns production # Create the production and canary deployments, and the services using the kubectl apply commands: kubectl apply -f k8s/production -n production kubectl apply -f k8s/canary -n production kubectl apply -f k8s/services -n production # Scale up the production environment frontends by running the following command kubectl scale deployment gceme-frontend-production -n production --replicas 4 # Now confirm that you have 5 pods running for the frontend, 4 for production traffic and 1 for canary releases kubectl get pods -n production -l app=gceme -l role=frontend # Also confirm that you have 2 pods for the backend, 1 for production and 1 for canary: kubectl get pods -n production -l app=gceme -l role=backend #Retrieve the external IP for the production services: kubectl get service gceme-frontend -n production # Now, store the frontend service load balancer IP in an environment variable for use later: export FRONTEND_SERVICE_IP=$(kubectl get -o jsonpath="{.status.loadBalancer.ingress[0].ip}" --namespace=production services gceme-frontend) # Confirm that both services are working by opening the frontend external IP address in your browser. # Check the version output of the service by running the following command (it should read 1.0.0): curl http://$FRONTEND_SERVICE_IP/version ``` ## Creating the Jenkins Pipeline - Creating the Jenkins Pipeline ```bash gcloud source repos create default git init # Initialize the sample-app directory as its own Git repository: git config credential.helper gcloud.sh # Run the following command git remote add origin https://source.developers.google.com/p/$DEVSHELL_PROJECT_ID/r/default # Set the username and email address for your Git commits. # Replace [EMAIL_ADDRESS] with your Git email address and [USERNAME] with your Git username: git config --global user.email "[EMAIL_ADDRESS]" git config --global user.name "[USERNAME]" git add . git commit -m "Initial commit" git push origin master ``` - Adding your service account credentials - In the Jenkins user interface, click Manage Jenkins in the left navigation then click Manage Credentials , Click Global credentials (unrestricted). - Configure Jenkins Cloud for Kubernetes - Creating the Jenkins job ```bash # https://source.developers.google.com/p/[PROJECT_ID]/r/default ``` ## Creating the development environment ```bash # Create a development branch and push it to the Git server: git checkout -b new-feature ``` - Modifying the pipeline definition - Use the Jenkinsfile - Add your PROJECT_ID to the REPLACE_WITH_YOUR_PROJECT_ID value in the Jenkinsfile. - CLUSTER_ZONE to to us-east1-c - Change the two instances of
with
in the html.go file. - main.go change the version to 2.0.0 ## Kick off Deployment - Commit and push your changes ```bash git add Jenkinsfile html.go main.go git commit -m "Version 2.0.0" git push origin new-feature ``` - Once that's all taken care of, start the proxy in the background: ```bash kubectl proxy & # If it stalls, press Ctrl + C to exit out. Verify that your application is accessible by # sending a request to localhost and letting kubectl proxy forward it to your service curl \ http://localhost:8001/api/v1/namespaces/new-feature/services/gceme-frontend:80/proxy/version # You should see it respond with 2.0.0, which is the version that is now running. ``` ## Deploying a canary release - Create a canary branch and push it to the Git server: ```bash git checkout -b canary git push origin canary ``` - In Jenkins, you should see the canary pipeline has kicked off. Once complete, you can check the service URL to ensure that some of the traffic is being served by your new version. You should see about 1 in 5 requests (in no particular order) returning version 2.0.0. ```bash export FRONTEND_SERVICE_IP=$(kubectl get -o \ jsonpath="{.status.loadBalancer.ingress[0].ip}" --namespace=production services gceme-frontend) while true; do curl http://$FRONTEND_SERVICE_IP/version; sleep 1; done # Output 1.0.0 1.0.0 1.0.0 2.0.0 1.0.0 1.0.0 1.0.0 ``` ## Deploying to production - Create a canary branch and push it to the Git server: ```bash git checkout master git merge canary git push origin master # Trigger master job export FRONTEND_SERVICE_IP=$(kubectl get -o \ jsonpath="{.status.loadBalancer.ingress[0].ip}" --namespace=production services gceme-frontend) # All should be 2.0 while true; do curl http://$FRONTEND_SERVICE_IP/version; sleep 1; done # Output 2.0.0 2.0.0 2.0.0 2.0.0 2.0.0 2.0.0 # kubectl get service gceme-frontend -n production ``` ================================================ FILE: home/cloud_providers/gcp/taskset_gcp_cloud_providers/task_033_continuous_delivery_with_jenkins_in_kubernetes_engine/values.yaml ================================================ controller: installPlugins: - kubernetes:latest - workflow-job:latest - workflow-aggregator:latest - credentials-binding:latest - git:latest - google-oauth-plugin:latest - google-source-plugin:latest - google-kubernetes-engine:latest - google-storage-plugin:latest resources: requests: cpu: "50m" memory: "1024Mi" limits: cpu: "1" memory: "3500Mi" javaOpts: "-Xms3500m -Xmx3500m" serviceType: ClusterIP agent: resources: requests: cpu: "500m" memory: "256Mi" limits: cpu: "1" memory: "512Mi" persistence: size: 100Gi serviceAccount: name: cd-jenkins ================================================ FILE: home/cloud_providers/oci/ReadMe.md ================================================ # taskset_oci_cloud_providers > [Auto](https://github.com/codeaprendiz/learn_fullstack/blob/main/home/php/intermediate/taskset_intermediate_php/task_004_createGlobalMarkdownTable/generate-readme.php) generated ReadMe. Number of tasks: 3 | Task | Description | |----------|--------------------------------------------------------------------------------------------------| | task_000 | [task_000_set_up_oci_cli](taskset_oci_cloud_providers/task_000_set_up_oci_cli) | | task_001 | [task_001_oci_cli_commands](taskset_oci_cloud_providers/task_001_oci_cli_commands) | | task_002 | [task_002_create_k8s_quick_create](taskset_oci_cloud_providers/task_002_create_k8s_quick_create) | ================================================ FILE: home/cloud_providers/oci/taskset_oci_cloud_providers/task_000_set_up_oci_cli/ReadMe.md ================================================ # Set up OCI Cli [docs.oracle.com » Developer Resources » Required Keys and OCIDs](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/apisigningkey.htm#two) [docs.oracle.com » Developer Resources » Working with CLI » Configuring the CLI](https://docs.oracle.com/en-us/iaas/Content/API/SDKDocs/cliconfigure.htm) - Login to oracle cloud - Go to your profile and download the API Keys - Download private and public key pairs - Create the OCI directory ```bash ╰─ mkdir -p ~/.oci/ ╰─ touch ~/.oci/config ``` - Move the downloaded keys to the OCI directory ```bash ╰─ ls config oci-private.pem oci-public.pem ``` - Set up your config file and repiar the config file permissions if needed ```bash ╰─ oci setup repair-file-permissions --file ~/.oci/oci-private.pem ``` ```bash ╰─ cat config [DEFAULT] user= fingerprint= tenancy= region= key_file=~/.oci/oci-private.pem ``` - If you have created any buckets, then you can list those buckets using the following command ```bash ╰─ oci os bucket list --compartment-id | jq '.data[] | {name} ' # Alternatively you can pass the config file as argument as well ╰─ oci os bucket list --compartment-id --config-file | jq '.data[] | {name} ' . ``` ================================================ FILE: home/cloud_providers/oci/taskset_oci_cloud_providers/task_001_oci_cli_commands/ReadMe.md ================================================ # OCI Commands # oci Command / Options | Use Case | Example | | ------------- |-------------| -------------| | [create](https://docs.oracle.com/en-us/iaas/tools/oci-cli/2.9.1/oci_cli_docs/cmdref/os/bucket/create.html) | Create OCI bucket | `╰─ oci os bucket create --compartment-id --name sandbox-v1-bucket` | [list](https://docs.oracle.com/en-us/iaas/tools/oci-cli/3.16.0/oci_cli_docs/cmdref/os/bucket/list.html)| List OCI buckets | `╰─ oci os bucket list --compartment-id `| ================================================ FILE: home/cloud_providers/oci/taskset_oci_cloud_providers/task_002_create_k8s_quick_create/ReadMe.md ================================================ # To create a k8s cluster in OCI using Quick Create feature and understand the network topology required We can later use this to create our own cluster using terraform ## K8S created ![](.images/k8s.png) ## The VCN Created ![img.png](.images/vcn.png) oke-vcn-quick-sandbox-k8s-a4bf5e044 - IPV4 CIDR `10.0.0.0/16` - Default route table `oke-public-routetable-sandbox-k8s-a4bf5e044` ## Subnets ![](.images/subnets.png) oke-nodesubnet-quick-sandbox-k8s-a4bf5e044-regional - CIDR `10.0.10.0/24` - Private ### Security List oke-nodeseclist-quick-sandbox-k8s-a4bf5e044 #### Ingress ![](.images/ingress-subnet-pri.png) - 10.0.10.0/24 --------------All Protocols-----> > All traffic for all ports > Allow pods on one worker node to communicate with pods on other worker nodes. Anything from within the subnet can communicate with one another - 10.0.0.0/28 --------------ICMP -------------> > Path discovery. Anything from API endpoint subnet can communicate over ICMP - 10.0.0.0/28 All Ports --------------All Protocols-----------> All Ports > TCP traffic for ports: All > TCP access from Kubernetes Control Plane. Anything coming from API endpoint subnet can communicate over TCP, all source ports to all destination ports - 0.0.0.0/0 All Ports ---------------TCP-----------------------> 22 > TCP traffic for ports: 22 SSH Remote Login Protocol. Inbound SSH traffic to worker nodes. Anything coming from Internet can communicate over TCP 22 for SSH access #### Egress ![](.images/egress-subnet-pri.png) - All ports--------ALL protocols---------> 10.0.10.0/24 (All ports). > Anything from within the subnet can communicate with one another. Allow pods on one worker node to communicate with pods on other worker nodes - ALL Ports--------TCP -------> 10.0.0.0/28 - Port 6443 (API subnet) > All ports can communicate to port 6443 of API subnet. Access to Kubernetes API Endpoint. - ALL Ports--------TCP -------> 10.0.0.0/28 - Port 12250 (API subnet) > All ports can communicate to port 12250 of API subnet. Kubernetes worker to control plane communication - -----------------ICMP--------> 10.0.0.0/28 > Path Discovery - All ports ----------------TCP-------------> All BOM Services In Oracle Services Network, 443 port > Allow nodes to communicate with OKE to ensure correct start-up and continued functioning - ----------------ICMP------------> 0.0.0.0/0 > ICMP Access from Kubernetes Control Plane - All ports ----------------All protocols---------------> 0.0.0.0/0, All ports > Worker Nodes access to Internet ### RouteTable oke-private-routetable-sandbox-k8s-a4bf5e044 - If -------------> 0.0.0.0/0, go to NAT Gateway > traffic to the internet - If -------------> All BOM Services In Oracle Services Network, go to Service Gateway > traffic to OCI services oke-k8sApiEndpoint-subnet-quick-sandbox-k8s-a4bf5e044-regional - CIDR `10.0.0.0/28` - Public ### Security List oke-k8sApiEndpoint-quick-sandbox-k8s-a4bf5e044 #### Ingress ![img.png](.images/ingress-api-subnet.png) - 0.0.0.0/0 All Ports ---------------TCP-----------------> 6443 > TCP traffic for ports: 6443 > External access to Kubernetes API endpoint - 10.0.10.0/24 All Ports ------------TCP-----------------> 6443 > TCP traffic for ports: 6443 > Kubernetes worker to Kubernetes API endpoint communication - 10.0.10.0/24 All Ports -----------TCP--------------------> 12250 > TCP traffic for ports: 12250 > Kubernetes worker to control plane communication - 10.0.10.0/24 -----------ICMP-------------------> > Path Discovery #### Egress ![img.png](.images/egress-api-subnet.png) - All Ports ------------------------TCP-----------------------> All BOM Services In Oracle Services Network, 443 > TCP traffic for ports: 443 HTTPS > Allow Kubernetes Control Plane to communicate with OKE - All Ports ------------------------TCP-----------------------> 10.0.10.0/24, All Ports > TCP traffic for ports: All > All traffic to worker nodes - ------------------------ICMP----------------------> 10.0.10.0/24 > Path discovery ### Route Table oke-public-routetable-sandbox-k8s-a4bf5e044 - If ------------> 0.0.0.0/0, then go to Internet Gateway > traffic to/from internet oke-svclbsubnet-quick-sandbox-k8s-a4bf5e044-regional - CIDR `10.0.20.0/24` - Public ### Security List oke-svclbseclist-quick-sandbox-k8s-a4bf5e044 #### Ingress ![img.png](.images/ingress-lb-sec.png) #### Egress ![img.png](.images/eggress-lb-sec.png) ### Route Table oke-public-routetable-sandbox-k8s-a4bf5e044 ================================================ FILE: home/containers/docker/ReadMe.md ================================================ # taskset_docker_containers > [Auto](https://github.com/codeaprendiz/learn_fullstack/blob/main/home/php/intermediate/taskset_intermediate_php/task_004_createGlobalMarkdownTable/generate-readme.php) generated ReadMe. Number of tasks: 33 | Task | Description | |----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | task_000 | [task_000_commands](taskset_docker_containers/task_000_commands) | | task_001 | [task_001_docker_overview](taskset_docker_containers/task_001_docker_overview) | | task_002 | [task_002_docker_run__detached_vs_foreground](taskset_docker_containers/task_002_docker_run__detached_vs_foreground) | | task_003 | [task_003_getting_started](taskset_docker_containers/task_003_getting_started) | | task_004 | [task_004_docker_run__pid_setting_and_choosing_image_with_tag](taskset_docker_containers/task_004_docker_run__pid_setting_and_choosing_image_with_tag) | | task_005 | [task_005_docker_run__assign_name_and_allocate_pseudo_tty](taskset_docker_containers/task_005_docker_run__assign_name_and_allocate_pseudo_tty) | | task_006 | [task_006_docker_run__expose_port_and_pull_policy_and_environment_vars](taskset_docker_containers/task_006_docker_run__expose_port_and_pull_policy_and_environment_vars) | | task_006 | [task_006_run_commands_in_container](taskset_docker_containers/task_006_run_commands_in_container) | | task_007 | [task_007_docker_run__full_container_capabilities_and_set_working_dir_and_volume_mounts](taskset_docker_containers/task_007_docker_run__full_container_capabilities_and_set_working_dir_and_volume_mounts) | | task_008 | [task_008_docker_run__hostsfile_ulimit_mem_limit](taskset_docker_containers/task_008_docker_run__hostsfile_ulimit_mem_limit) | | task_008 | [task_008_docker_run__metadata_and_network_and_attach_to_stdout](taskset_docker_containers/task_008_docker_run__metadata_and_network_and_attach_to_stdout) | | task_009 | [task_009_docker_attach](taskset_docker_containers/task_009_docker_attach) | | task_010 | [task_010_docker_build](taskset_docker_containers/task_010_docker_build) | | task_011 | [task_011_mongo](taskset_docker_containers/task_011_mongo) | | task_012 | [task_012_postgres](taskset_docker_containers/task_012_postgres) | | task_013 | [task_013_prometheus_blackbox_exporter](taskset_docker_containers/task_013_prometheus_blackbox_exporter) | | task_014 | [task_014_elastic_search](taskset_docker_containers/task_014_elastic_search) | | task_015 | [task_015_elastic_search_bkp_restore](taskset_docker_containers/task_015_elastic_search_bkp_restore) | | task_016 | [task_016_elastic_search_backup_restore_sample_data](taskset_docker_containers/task_016_elastic_search_backup_restore_sample_data) | | task_017 | [task_017_mem_and_cpu_limit_container](taskset_docker_containers/task_017_mem_and_cpu_limit_container) | | task_018 | [task_018_mysql](taskset_docker_containers/task_018_mysql) | | task_019 | [task_019_nginx_https_domain_test](taskset_docker_containers/task_019_nginx_https_domain_test) | | task_020 | [task_020_docker_commit](taskset_docker_containers/task_020_docker_commit) | | task_021 | [task_021_kibana](taskset_docker_containers/task_021_kibana) | | task_022 | [task_022_sample_app](taskset_docker_containers/task_022_sample_app) | | task_023 | [task_023_update_sample_app](taskset_docker_containers/task_023_update_sample_app) | | task_024 | [task_024_sample_app_persist_db](taskset_docker_containers/task_024_sample_app_persist_db) | | task_025 | [task_025_sonarqube__keycloak__saml](taskset_docker_containers/task_025_sonarqube__keycloak__saml) | | task_026 | [task_026_mssql](taskset_docker_containers/task_026_mssql) | | task_027 | [task_027_docker_openvpn](taskset_docker_containers/task_027_docker_openvpn) | | task_028 | [task_028_docker_engine__networking__overview](taskset_docker_containers/task_028_docker_engine__networking__overview) | | task_029 | [task_029_docker_engine__networking__bridge_network_tutorial](taskset_docker_containers/task_029_docker_engine__networking__bridge_network_tutorial) | | task_030 | [task_030_docker_engine__networking__host_networking_tutorial](taskset_docker_containers/task_030_docker_engine__networking__host_networking_tutorial) | ================================================ FILE: home/containers/docker/taskset_docker_containers/task_000_commands/ReadMe.md ================================================ # docker commands More details at [Offical Guide](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands) - [docker commands](#docker-commands) - [image](#image) - [rm \[id-of-the-image\]](#rm-id-of-the-image) - [images](#images) - [run](#run) - [interactive](#interactive) - [tty](#tty) ## image ### rm [id-of-the-image] To remove the image with specific ID ```bash $ sudo docker images | grep ubuntu Password: ubuntu latest 4e5021d210f6 2 weeks ago 64.2MB $ sudo docker image rm 4e5021d210f6 Untagged: ubuntu:latest Untagged: ubuntu@sha256:bec5a2727be7fff3d308193cfde3491f8fba1a2ba392b7546b43a051853a341d Deleted: sha256:4e5021d210f65ebe915670c7089120120bc0a303b90208592851708c1b8c04bd Deleted: sha256:1d9112746e9d86157c23e426ce87cc2d7bced0ba2ec8ddbdfbcc3093e0769472 Deleted: sha256:efcf4a93c18b5d01aa8e10a2e3b7e2b2eef0378336456d8653e2d123d6232c1e Deleted: sha256:1e1aa31289fdca521c403edd6b37317bf0a349a941c7f19b6d9d311f59347502 Deleted: sha256:c8be1b8f4d60d99c281fc2db75e0f56df42a83ad2f0b091621ce19357e19d853 ``` ## images To show all the images present ```bash $ sudo docker images Password: REPOSITORY TAG IMAGE ID CREATED SIZE ubuntu latest 4e5021d210f6 2 weeks ago 64.2MB busybox latest 83aa35aa1c79 3 weeks ago 1.22MB ``` ## run ### interactive >--interactive , -i > >Keep STDIN open even if not attached ```bash $ sudo docker run -i ubuntu:latest bash pwd / exit $ ``` ### tty >--tty , -t > >Allocate a pseudo-TTY You have to externally kill the container in this case ```bash $ sudo docker run -t ubuntu:latest bash root@b01ba82675f5:/# pwd ls exit ^C^C root@b01ba82675f5:/# exit ``` When you combine -i and -t, you get a proper terminal like experience ```bash $ sudo docker run -i -t ubuntu:latest bash Unable to find image 'ubuntu:latest' locally latest: Pulling from library/ubuntu 5bed26d33875: Pull complete f11b29a9c730: Pull complete 930bda195c84: Pull complete 78bf9a5ad49e: Pull complete Digest: sha256:bec5a2727be7fff3d308193cfde3491f8fba1a2ba392b7546b43a051853a341d Status: Downloaded newer image for ubuntu:latest root@e421090e426a:/# ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_001_docker_overview/ReadMe.md ================================================ # Docker overview [docs.docker.com/get-started/overview](https://docs.docker.com/get-started/overview) Version Stack | Stack | Version | |--------|----------| | Docker | 20.10.14 | ## Usecase to solve actual problem ### docker run The following command runs an `ubuntu` container, attaches interactively to your local command-line session, and runs `/bin/bash`. ```bash # Version ❯ docker -v Docker version 20.10.14, build a224086 # run : Run a command in a new container # -i : interactively # -t : attached to your terminal # ubuntu : is the image we will be downloading # /bin/bash : the command that will run inside the container started with ubuntu image ❯ docker run -i -t ubuntu /bin/bash root@f3d2356faadc:/# ls bin boot dev etc home lib media mnt opt proc root run sbin srv sys tmp usr var # exit ❯ docker ps # To show all running containers CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES ❯ docker ps -a # To show all containers including stopped ones CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 864a03e87269 ubuntu "sh" 23 seconds ago Exited (0) 17 seconds ago unruffled_margulis ❯ docker rm unruffled_margulis # Remove the container unruffled_margulis ❯ docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES ❯ ``` Docker starts the container and executes /bin/bash. Because the container is running interactively and attached to your terminal (due to the -i and -t flags), you can provide input using your keyboard while the output is logged to your terminal. That's all for today! ================================================ FILE: home/containers/docker/taskset_docker_containers/task_002_docker_run__detached_vs_foreground/ReadMe.md ================================================ ## Detached vs Foreground - [docs.docker.com/engine/reference/run](https://docs.docker.com/engine/reference/run) - [docs.docker.com/engine/reference/commandline/run](https://docs.docker.com/engine/reference/commandline/run) Version Stack | Stack | Version | |--------|----------| | Docker | 20.10.14 | By design, containers started in detached mode exit when the root process used to run the container exits ### Detached Mode ```bash # Note: No --rm option ❯ docker run -d -p 80:80 nginx service nginx start 3fdd6761951aeba2a8936a54a7fea982b1b7073a0d2892cab9a4c095d080900b # Note the container exited after starting ❯ docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 3fdd6761951a nginx "/docker-entrypoint.…" 4 seconds ago Exited (0) 4 seconds ago wonderful_fermi # Note: We added --rm option ❯ docker run --rm -d -p 80:80 nginx service nginx start e836a7703057577b1aa58ac5cf9ca4e9bb85767069651f9fd8ac1972c4d041c0 # The container also exited after being stopped ❯ docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES ❯ # You can start an ubuntu container as well in detached mode ❯ docker run -d ubuntu /bin/bash 63e90449bddb96856fb2cebcb33c5b8f12859ac59bea39645d5c9877215a8cac ❯ docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 63e90449bddb ubuntu "/bin/bash" 2 seconds ago Exited (0) 1 second ago distracted_pasteur ❯ docker run -d ubuntu sleep 100 a979bd34e5c4d34e8dcc30c464ed3b432fe77938ae3df3ea3983da24d0c649c4 ❯ docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES a979bd34e5c4 ubuntu "sleep 100" 3 seconds ago Up 3 seconds sweet_wozniak 63e90449bddb ubuntu "/bin/bash" 30 seconds ago Exited (0) 29 seconds ago distracted_pasteur ``` ### Foreground Mode In foreground mode (the default when -d is not specified), docker run can start the process in the container and attach the console to the process’s standard input, output, and standard error For interactive processes (like a shell), you must use -i -t together in order to allocate a tty for the container process ```bash ❯ docker run -it ubuntu /bin/bash root@6036032b640a:/# ls bin dev home media opt root sbin sys usr boot etc lib mnt proc run srv tmp var root@6036032b640a:/# exit exit ❯ ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_003_getting_started/ReadMe.md ================================================ [Getting Started](https://docs.docker.com/get-started/) - Run ```bash $ docker run -d -p 80:80 docker/getting-started Unable to find image 'docker/getting-started:latest' locally latest: Pulling from docker/getting-started df9b9388f04a: Pull complete 5867cba5fcbd: Pull complete 4b639e65cb3b: Pull complete 061ed9e2b976: Pull complete bc19f3e8eeb1: Pull complete 4071be97c256: Pull complete 79b586f1a54b: Pull complete 0c9732f525d6: Pull complete Digest: sha256:b558be874169471bd4e65bd6eac8c303b271a7ee8553ba47481b73b2bf597aae Status: Downloaded newer image for docker/getting-started:latest 4fb7848e41a1f4135e029b438f3e0fe424dbe5d458618b625128c8f72013b1ff $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 4fb7848e41a1 docker/getting-started "/docker-entrypoint.…" 2 minutes ago Up 2 minutes 0.0.0.0:80->80/tcp quirky_rosalind ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_004_docker_run__pid_setting_and_choosing_image_with_tag/ReadMe.md ================================================ ## PID Setting and choosing image with Specific Tag Version Stack | Stack | Version | |--------|----------| | Docker | 20.10.14 | ### ImageTag [imagetag](https://docs.docker.com/engine/reference/run/#imagetag) - Run image with specific tag say ubuntu:14.04 ```bash ❯ docker run --rm -it -d ubuntu:14.04 sh 32bd86340d4773b17d5a9ba5c2f8f448ab4d29186801a6d989ad53a2a0a48af3 ❯ docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 32bd86340d47 ubuntu:14.04 "sh" 5 seconds ago Up 4 seconds practical_ishizaka ``` ### PID [pid-settings---pid](https://docs.docker.com/engine/reference/run/#pid-settings---pid) Let's create two containers a1 and a2, and we want container a2 to be able to see the processes running in container a1 ```bash # Terminal session 1 ❯ docker run --rm --name=a1 -it ubuntu /bin/bash root@fefc7f52750f:/# sleep 10000 # Terminal session 2 ❯ docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES fefc7f52750f ubuntu "/bin/bash" 33 seconds ago Up 32 seconds a1 # Note that a2 cannot see the processes running inside of a1 yet. ❯ docker run --rm --name=a2 -it ubuntu /bin/bash root@9630d2dd813f:/# ps -ef UID PID PPID C STIME TTY TIME CMD root 1 0 0 10:34 pts/0 00:00:00 /bin/bash root 9 1 0 10:34 pts/0 00:00:00 ps -ef root@9630d2dd813f:/# #let's exit root@9630d2dd813f:/# exit exit ❯ docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES fefc7f52750f ubuntu "/bin/bash" About a minute ago Up About a minute a1 # Now let's start the second container a2 using pid=container:a1 # Note that now it's able to see the process sleep running in a1 ❯ docker run --rm --name=a2 --pid=container:a1 -it ubuntu /bin/bash root@0bfaed14e83d:/# ps -ef UID PID PPID C STIME TTY TIME CMD root 1 0 0 10:33 pts/0 00:00:00 /bin/bash root 10 1 0 10:33 pts/0 00:00:00 sleep 10000 root 11 0 0 10:35 pts/0 00:00:00 /bin/bash root 20 11 0 10:35 pts/0 00:00:00 ps -ef ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_005_docker_run__assign_name_and_allocate_pseudo_tty/ReadMe.md ================================================ ## Assign name and allocate pseudo tty [assign-name-and-allocate-pseudo-tty---name--it](https://docs.docker.com/engine/reference/commandline/run/#assign-name-and-allocate-pseudo-tty---name--it) Version Stack | Stack | Version | |--------|----------| | Docker | 20.10.14 | - Let's begin ```bash # allocate pseudo tty : -it # --name for container name ❯ docker run --name test -it debian root@d6c0fe130dba:/# exit 13 # Note the exit code is passed to the caller of docker run. ❯ echo $? 13 ❯ docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 4a94928d6520 debian "bash" About a minute ago Exited (13) 54 seconds ago test ``` ## Capture container ID [capture-container-id---cidfile](https://docs.docker.com/engine/reference/commandline/run/#capture-container-id---cidfile) - This will create a container and print `test` to the console. The `cidfile` flag makes Docker attempt to create a new file and write the container ID to it. ```bash ❯ docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" test ❯ docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES c2683f80d7bf ubuntu "echo test" 5 seconds ago Exited (0) 4 seconds ago youthful_hypatia # Note the container ID got captured in the file ❯ cat /tmp/docker_test.cid c2683f80d7bf613f4004911904a908377a43fb1ab556988f3aa9992647cd184a ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_006_docker_run__expose_port_and_pull_policy_and_environment_vars/ReadMe.md ================================================ ## Expose Port, Pull Policy And Environment Variables Version Stack | Stack | Version | |--------|----------| | Docker | 20.10.14 | ### Expose Port [publish-or-expose-port--p---expose](https://docs.docker.com/engine/reference/commandline/run/#publish-or-expose-port--p---expose) ```bash ❯ docker run --rm -d -p 8081:80 nginx nginx -g 'daemon off;' f73315b8a038d94192802c894c72fa3957ca4db019f312e829c3a612fbf17d63 ❯ curl localhost:8081 -I HTTP/1.1 200 OK Server: nginx/1.23.2 Date: Mon, 14 Nov 2022 14:59:15 GMT Content-Type: text/html Content-Length: 615 Last-Modified: Wed, 19 Oct 2022 07:56:21 GMT Connection: keep-alive ETag: "634fada5-267" Accept-Ranges: bytes ``` You can visit the same in browser ![nginx.png](.images/nginx.png) ## Pull Policy [set-the-pull-policy---pull](https://docs.docker.com/engine/reference/commandline/run/#-set-the-pull-policy---pull) ```bash ❯ docker pull ubuntu ❯ docker images | grep ubuntu ubuntu latest 3c2df5585507 2 weeks ago 69.2MB ❯ docker rmi ubuntu ❯ docker run --pull=never ubuntu docker: Error response from daemon: No such image: ubuntu:latest. # As there is no image with this tag locally ❯ docker images | grep ubuntu | wc -l 0 ``` ## Environment [set-environment-variables--e---env---env-file](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file) ```bash # Let's create a file ❯ echo "VAR3=value" > .env ❯ cat .env VAR3=value ❯ docker run --rm -it -e MYVAR1=bar --env MYVAR2=foo --env-file ./.env ubuntu bash root@9cb685c53176:/# env | grep VAR MYVAR2=foo MYVAR1=bar VAR3=value root@9cb685c53176:/# exit exit ❯ docker run --rm -it -e MYVAR1=bar --env MYVAR2=foo --env-file ./.env ubuntu env | grep VAR VAR3=value MYVAR1=bar MYVAR2=foo ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_006_run_commands_in_container/ReadMe.md ================================================ # To execute command inside image containers > docker run -it [image-name:tag] [command-to-execute] ```bash $ docker run -it ubuntu:latest bash Unable to find image 'ubuntu:latest' locally latest: Pulling from library/ubuntu 5bed26d33875: Pull complete f11b29a9c730: Pull complete 930bda195c84: Pull complete 78bf9a5ad49e: Pull complete Digest: sha256:bec5a2727be7fff3d308193cfde3491f8fba1a2ba392b7546b43a051853a341d Status: Downloaded newer image for ubuntu:latest root@e421090e426a:/# ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_007_docker_run__full_container_capabilities_and_set_working_dir_and_volume_mounts/ReadMe.md ================================================ ## Full container capabilities, Set working directory And Mount Volumes Version Stack | Stack | Version | |--------|----------| | Docker | 20.10.14 | ### Full container capabilities [full-container-capabilities---privileged](https://docs.docker.com/engine/reference/commandline/run/#full-container-capabilities---privileged) ```bash ❯ docker run -t -i --rm ubuntu bash root@ae4994d93a27:/# mount -t tmpfs none /mnt mount: /mnt: permission denied. root@ae4994d93a27:/# exit exit ❯ docker run -t -i --privileged ubuntu bash root@6ad992ced205:/# mount -t tmpfs none /mnt root@6ad992ced205:/# df -h | egrep "Filesystem|tmpfs" Filesystem Size Used Avail Use% Mounted on tmpfs 64M 0 64M 0% /dev ``` ### Set working directory [set-working-directory--w](https://docs.docker.com/engine/reference/commandline/run/#set-working-directory--w) ```bash ❯ docker run -w /path/to/dir/ -i -t ubuntu pwd /path/to/dir ``` ### Mount Volumes [mount-volume](https://docs.docker.com/engine/reference/commandline/run/#mount-volume--v---read-only) ```bash ## Terminal session 1 ❯ ls ReadMe.md # Note that the container also see the file ReadMe.md as we have mounted the same using pwd ❯ docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu ls ReadMe.md ## Terminal session 2 # Let's create another directory and see ❯ mkdir -p /tmp/test ❯ cd /tmp/test ❯ touch test.txt # Terminal session 1 ❯ docker run --rm -v /tmp/test:/foo -w /foo -i -t ubuntu bash # Let's see if the container can see the file root@0e19af311731:/foo# ls test.txt root@0e19af311731:/foo# exit exit ## The same can also be achieved using mount flag ❯ docker run -t -i -w /foo --mount type=bind,src=/tmp/test,dst=/foo busybox sh /foo # ls test.txt /foo # touch newfile.txt /foo # exit ❯ ls /tmp/test newfile.txt test.txt ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_008_docker_run__hostsfile_ulimit_mem_limit/ReadMe.md ================================================ # Hosts file, ulimit, memory limit ## Hosts file [add-entries-to-container-hosts-file---add-host](https://docs.docker.com/engine/reference/commandline/run/#add-entries-to-container-hosts-file---add-host) ```bash ## get IP from ping google.com ❯ docker run --add-host=myhost:142.250.181.78 --rm -it alpine / # ping myhost PING myhost (142.250.181.78): 56 data bytes 64 bytes from 142.250.181.78: seq=0 ttl=37 time=19.929 ms ``` ## ulimit [set-ulimits-in-container---ulimit](https://docs.docker.com/engine/reference/commandline/run/#set-ulimits-in-container---ulimit) ```bash ❯ docker run --rm debian sh -c "ulimit -n" 1048576 ❯ docker run --ulimit nofile=1024:1024 --rm debian sh -c "ulimit -n" 1024 ``` ## Memory limit [specify-hard-limits-on-memory-available-to-containers--m---memory](https://docs.docker.com/engine/reference/commandline/run/#specify-hard-limits-on-memory-available-to-containers--m---memory) ```bash ## Terminal session 1 ❯ docker run --rm -it ubuntu ## Terminal session 2 ❯ docker stats ## Terminal session 1 ❯ docker run --rm -it --memory="2g" ubuntu ## Terminal session 2 ❯ docker stats ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_008_docker_run__metadata_and_network_and_attach_to_stdout/ReadMe.md ================================================ ## Metadata, Network and Attach to stdout/stdin/stderr ### Set metadata on container [set-metadata-on-container--l---label---label-file](https://docs.docker.com/engine/reference/commandline/run/#set-metadata-on-container--l---label---label-file) ```bash # Terminal session 1 ❯ docker run --rm -l my-label --label com.example.foo=bar -it ubuntu bash root@877169a9c393:/# # Terminal session 2 ❯ docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 877169a9c393 ubuntu "bash" 27 seconds ago Up 27 seconds lucid_heyrovsky ❯ docker inspect lucid_heyrovsky | egrep "my-label|com" "com.example.foo": "bar", "my-label": "" # Terminal session 1 root@877169a9c393:/# exit exit ❯ ``` ### Connect to network Let's create a nework. Connect two containers `a1` and `a2` to the same network. Then we should be able to ping `a2` from `a1` [connect-a-container-to-a-network---network](https://docs.docker.com/engine/reference/commandline/run/#connect-a-container-to-a-network---network) ```bash # Terminal session 1 # Create a network ❯ docker network create mynet ❯ docker network ls NETWORK ID NAME DRIVER SCOPE d94bd0d8c825 bridge bridge local 4173677fe745 host host local 92def2900117 mynet bridge local 2328a0a37fde none null local ❯ docker run --rm --name=a1 -it --network=mynet busybox # Terminal session 2 ❯ docker run --rm --name=a2 -it --network=mynet busybox ## Note they are able to ping each other with just container names / # ping a1 PING a1 (172.18.0.2): 56 data bytes 64 bytes from 172.18.0.2: seq=0 ttl=64 time=0.296 ms 64 bytes from 172.18.0.2: seq=1 ttl=64 time=0.371 ms . / # exit ❯ ``` ## Attach to stdout [attach-to-stdinstdoutstderr--a](https://docs.docker.com/engine/reference/commandline/run/#attach-to-stdinstdoutstderr--a) Let's do an example of attaching to container's stdout ```bash # The following attaches stdout to your container. # So the output generated by the container will be displayed on your terminal ❯ docker run -a stdout --rm --name ubuntu ubuntu ls var backups cache lib local lock log mail opt run spool tmp ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_009_docker_attach/ReadMe.md ================================================ # docker attach [https://docs.docker.com/engine/reference/commandline/attach/](https://docs.docker.com/engine/reference/commandline/attach/) Once you are attached to a container using docker attach - To stop a container, use CTRL-c. This key sequence sends SIGKILL to the container. - If the container was run with -i and -t, you can detach from a container and leave it running using the CTRL-p CTRL-q key sequence. ## Attach to and detach from a running container ```bash ❯ docker run -d --name topdemo ubuntu:22.04 /usr/bin/top -b ❯ docker attach topdemo # pressing CTRL-c terminates the container ❯ docker ps -a ``` - Repeating the example above, but this time with the -i and -t options set; ```bash ❯ docker run -dit --name topdemo2 ubuntu:22.04 /usr/bin/top -b ❯ docker attach topdemo2 # pressing the CTRL-p CTRL-q the attach command is detached from the container # and the container is still running ❯ docker ps -a ``` ## Get the exit code of the container’s command - You can see the exit code returned by the bash process is returned by the docker attach command to its caller too ```bash ❯ docker run --name test -dit alpine ❯ docker attach test / # exit 13 ❯ echo $? 13 ❯ docker ps -a --filter name=test CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 7828441ca97a alpine "/bin/sh" 48 seconds ago Exited (13) 31 seconds ago test ``` - In the given example, a Docker container is started with the alpine image in detached mode using the docker run command. Then, the docker attach command is used to attach to the running container. - Inside the container, the exit 13 command is executed, which will cause the bash process to exit with a status code of 13. The docker attach command will then return the same exit code to its caller, which in this case is the shell where the command was executed. - This means that the exit code returned by the docker attach command will be the same as the exit code returned by the bash process running inside the container. In other words, if the container's main process exits with a non-zero status code, the docker attach command will also return a non-zero status code to indicate that the command failed. This behavior allows you to check the exit status of a command running inside a container, which can be useful for scripting or automation purposes. ================================================ FILE: home/containers/docker/taskset_docker_containers/task_010_docker_build/Dockerfile ================================================ FROM nginx:latest COPY index.html /usr/share/nginx/html/ ================================================ FILE: home/containers/docker/taskset_docker_containers/task_010_docker_build/ReadMe.md ================================================ # docker build [docs.docker.com/engine/reference/commandline/build](https://docs.docker.com/engine/reference/commandline/build/) **High Level Objectives** - run nginx docker on port 8080 - create a custom dockerfile with some changes - build an image with specific tag - run container that uses new image - understand the differences **Skills** - docker - docker build - tag - docker images **Version Stack** | Stack | Version | |--------|----------| | docker | 20.10.14 | ## run nginx container ```bash ❯ docker run -it --rm -d -p 8080:80 --name nginx nginx a6d8a4f9987c77a27c4f7864b82d86a1a1c0a899bf79bd3b70ef893ae74cf92d ❯ docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES a6d8a4f9987c nginx "/docker-entrypoint.…" 4 seconds ago Up 4 seconds 0.0.0.0:8080->80/tcp nginx ❯ curl localhost:8080 Welcome to nginx!

Welcome to nginx!

If you see this page, the nginx web server is successfully installed and working. Further configuration is required.

For online documentation and support please refer to nginx.org.
Commercial support is available at nginx.com.

Thank you for using nginx.

❯ docker exec -it nginx bash root@41e0b0f9b11b:/# ls /usr/share/nginx/html/ 50x.html index.html root@41e0b0f9b11b:/# cat /usr/share/nginx/html/index.html Welcome to nginx!

Welcome to nginx!

If you see this page, the nginx web server is successfully installed and working. Further configuration is required.

For online documentation and support please refer to nginx.org.
Commercial support is available at nginx.com.

Thank you for using nginx.

``` ## Create Dockerfile and index.html ## Build ```bash ❯ docker build -f Dockerfile . ``` ## Check images ```bash ❯ docker images | head -n 2 REPOSITORY TAG IMAGE ID CREATED SIZE 9a69d38ae721 32 seconds ago 135MB ``` ## Tag ```bash ❯ docker build -t my-docker-image -f Dockerfile . ❯ docker images | head -n 2 REPOSITORY TAG IMAGE ID CREATED SIZE my-docker-image latest 9a69d38ae721 About a minute ago 135MB ``` ## Run image with new container ```bash ❯ docker ps -a | grep -v "IMAGE" | awk '{ print $NF}' | xargs docker stop nginx ❯ docker run -it --rm -d -p 8080:80 --name nginx my-docker-image:latest 1535f427e222e04b71d91c7b4ccb740ec2d1fb4624828bb4f145e303fc1815c3 ❯ curl localhost:8080 Hello, Docker!

Hello, Docker!

Welcome to my Docker container!

❯ docker exec -it nginx bash root@7e144aa1d8ec:/# cat /usr/share/nginx/html/index.html Hello, Docker!

Hello, Docker!

Welcome to my Docker container!

root@7e144aa1d8ec:/# ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_010_docker_build/index.html ================================================ Hello, Docker!

Hello, Docker!

Welcome to my Docker container!

================================================ FILE: home/containers/docker/taskset_docker_containers/task_011_mongo/ReadMe.md ================================================ [Referenced link](https://www.code4it.dev/blog/run-mongodb-on-docker) ```bash $ docker run -d --name mongo-on-docker -p 27888:27017 -e MONGO_INITDB_ROOT_USERNAME=mongoadmin -e MONGO_INITDB_ROOT_PASSWORD=secret mongo 0f4060b2b64f35642a4988c5dde5eb9461f87de36ac297b10142a2701269ba8f ``` - While connecting using the client, the following details are required ```bash Server: localhost Port: 27888 Username: mongoadmin Password: secret ``` - You can use the following to connect to the shell ```bash $ mongo --username mongoadmin --password secret --port 27888 --host 127.0.0.1 MongoDB shell version v4.4.3 connecting to: mongodb://127.0.0.1:27888/?compressors=disabled&gssapiServiceName=mongodb Implicit session: session { "id" : UUID("7e4758b6-a551-4f20-bece-584fb042996d") } MongoDB server version: 4.4.4 --- The server generated these startup warnings when booting: 2021-05-30T07:21:09.404+00:00: Using the XFS filesystem is strongly recommended with the WiredTiger storage engine. See http://dochub.mongodb.org/core/prodnotes-filesystem --- --- Enable MongoDB's free cloud-based monitoring service, which will then receive and display metrics about your deployment (disk utilization, CPU, operation statistics, etc). The monitoring data will be available on a MongoDB website with a unique URL accessible to you and anyone you share the URL with. MongoDB may use this information to make product improvements and to suggest MongoDB products and deployment options to you. To enable free monitoring, run the following command: db.enableFreeMonitoring() To permanently disable this reminder, run the following command: db.disableFreeMonitoring() --- > ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_012_postgres/ReadMe.md ================================================ - Run the following command ```bash $ docker run -d \ --name my_postgres \ -v /tmp/data:/var/lib/postgresql/data \ -p 54320:5432 \ -e POSTGRES_PASSWORD=my_password postgres 7d93d3b28d3447f5bd4ed149a7084a7d46872b6efc7d2fc4720d25381dae9169 ``` - Check the status of the container ```bash $ docker ps -a | egrep -v "k8s" CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 7d93d3b28d34 postgres "docker-entrypoint.s…" 12 seconds ago Up 11 seconds 0.0.0.0:54320->5432/tcp, :::54320->5432/tcp my_postgres ``` - Try connecting to the database ```bash $ psql -h 127.0.0.1 -p 54320 --username=postgres Password for user postgres: psql (13.2, server 13.3 (Debian 13.3-1.pgdg100+1)) Type "help" for help. postgres=# postgres=# \du List of roles Role name | Attributes | Member of -----------+------------------------------------------------------------+----------- postgres | Superuser, Create role, Create DB, Replication, Bypass RLS | {} ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_013_prometheus_blackbox_exporter/ReadMe.md ================================================ ## Black box exporter POC [Docs](https://prometheus.io/docs/guides/multi-target-exporter/) - Starting the blackbox exporter ```bash $ docker run -p 9115:9115 prom/blackbox-exporter level=info ts=2020-07-27T17:37:24.916Z caller=main.go:212 msg="Starting blackbox_exporter" version="(version=0.17.0, branch=HEAD, revision=1bc768014cf6815f7e9d694e0292e77dd10f3235)" level=info ts=2020-07-27T17:37:24.916Z caller=main.go:213 msg="Build context" (gogo1.14.4,userroot@626fb3899f41,date20200619-11:54:41)=(MISSING) level=info ts=2020-07-27T17:37:24.918Z caller=main.go:225 msg="Loaded config file" level=info ts=2020-07-27T17:37:24.919Z caller=main.go:369 msg="Listening on address" address=:9115 ``` - Quering the exporter itself ```bash $ curl 'localhost:9115/metrics' # HELP blackbox_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which blackbox_exporter was built. . promhttp_metric_handler_requests_total{code="503"} 0 ``` - to query prometheus.io in the terminal with curl: ```bash $ docker \ > run -p 9115:9115 \ > --mount type=bind,source="$(pwd)"/blackbox.yml,target=/blackbox.yml,readonly \ > prom/blackbox-exporter \ > --config.file="/blackbox.yml" level=info ts=2020-07-27T18:06:49.187Z caller=main.go:212 msg="Starting blackbox_exporter" version="(version=0.17.0, branch=HEAD, revision=1bc768014cf6815f7e9d694e0292e77dd10f3235)" level=info ts=2020-07-27T18:06:49.187Z caller=main.go:213 msg="Build context" (gogo1.14.4,userroot@626fb3899f41,date20200619-11:54:41)=(MISSING) level=info ts=2020-07-27T18:06:49.190Z caller=main.go:225 msg="Loaded config file" level=info ts=2020-07-27T18:06:49.190Z caller=main.go:369 msg="Listening on address" address=:9115 ``` With this command, you told docker to: - run a container with the port 9115 outside the container mapped to the port 9115 inside of the container. - mount from your current directory ($(pwd) stands for print working directory) the file blackbox.yml into /blackbox.yml in readonly mode. - use the image prom/blackbox-exporter from Docker hub. - run the blackbox-exporter with the flag --config.file telling it to use /blackbox.yml as config file. Now you can try our new IPv4-using module http_2xx in a terminal: ```bash $ curl 'localhost:9115/probe?target=prometheus.io&module=http_2xx' # HELP probe_dns_lookup_time_seconds Returns the time taken for probe dns lookup in seconds . probe_success 1 # HELP probe_tls_version_info Contains the TLS version used # TYPE probe_tls_version_info gauge probe_tls_version_info{version="TLS 1.3"} 1 ``` Run Prometheus on MacOS ```bash $ docker \ > run -p 9090:9090 \ > --mount type=bind,source="$(pwd)"/prometheus.yml,target=/prometheus.yml,readonly \ > prom/prometheus \ > --config.file="/prometheus.yml" level=info ts=2020-07-27T18:23:09.768Z caller=main.go:302 msg="No time or size retention was set so using the default time retention" duration=15d . level=info ts=2020-07-27T18:23:09.791Z caller=main.go:646 msg="Server is ready to receive web requests." ``` If everything works fine, you can check the targets at [localhost:9090/targets](localhost:9090/targets) ![img](./.images/localhost-targets-prometheus.png) ================================================ FILE: home/containers/docker/taskset_docker_containers/task_013_prometheus_blackbox_exporter/blackbox.yml ================================================ modules: http_2xx: prober: http http: preferred_ip_protocol: "ip4" http_post_2xx: prober: http http: method: POST tcp_connect: prober: tcp pop3s_banner: prober: tcp tcp: query_response: - expect: "^+OK" tls: true tls_config: insecure_skip_verify: false ssh_banner: prober: tcp tcp: query_response: - expect: "^SSH-2.0-" irc_banner: prober: tcp tcp: query_response: - send: "NICK prober" - send: "USER prober prober prober :prober" - expect: "PING :([^ ]+)" send: "PONG ${1}" - expect: "^:[^ ]+ 001" icmp: prober: icmp ================================================ FILE: home/containers/docker/taskset_docker_containers/task_013_prometheus_blackbox_exporter/prometheus.yml ================================================ global: scrape_interval: 5s scrape_configs: - job_name: blackbox # To get metrics about the exporter itself metrics_path: /metrics static_configs: - targets: - host.docker.internal:9115 # For Windows and macOS replace with - host.docker.internal:9115 - job_name: blackbox-http # To get metrics about the exporter’s targets metrics_path: /probe params: module: [http_2xx] static_configs: - targets: - http://prometheus.io # Target to probe with http - https://prometheus.io # Target to probe with https - http://example.com:8080 # Target to probe with http on port 8080 relabel_configs: - source_labels: [__address__] target_label: __param_target - source_labels: [__param_target] target_label: instance - target_label: __address__ replacement: host.docker.internal:9115 # The blackbox exporter’s real hostname:port. For Windows and macOS replace with - host.docker.internal:9115 ================================================ FILE: home/containers/docker/taskset_docker_containers/task_014_elastic_search/ReadMe.md ================================================ To start docker-elastic-search on single node [Docs](https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html) - Start docker-elastic-search ```bash $ $ docker run -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.8.1 ``` - Check the health ```bash $ curl -X GET "localhost:9200/_cat/nodes?v&pretty" ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name 172.17.0.3 49 96 6 0.56 0.38 0.40 dilmrt * 7fc9a4e5361c ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_015_elastic_search_bkp_restore/Dockerfile ================================================ FROM docker.elastic.co/elasticsearch/elasticsearch:7.7.0 #this is to tell that we are passing these arguments at runtime ARG ENV_VAR_AWS_ACCESS_KEY_ID ARG ENV_VAR_AWS_SECRET_ACCESS_KEY ENV AWS_ACCESS_KEY_ID ${ENV_VAR_AWS_ACCESS_KEY_ID} ENV AWS_SECRET_ACCESS_KEY ${ENV_VAR_AWS_SECRET_ACCESS_KEY} ENV xpack.security.enabled 'false' ENV xpack.monitoring.enabled 'false' ENV xpack.graph.enabled 'false' ENV xpack.watcher.enabled 'false' ENV discovery.type 'single-node' ENV bootstrap.memory_lock 'true' ENV indices.memory.index_buffer_size '30%' RUN /usr/share/elasticsearch/bin/elasticsearch-plugin install --batch repository-s3 RUN /usr/share/elasticsearch/bin/elasticsearch-keystore create RUN echo $AWS_ACCESS_KEY_ID | /usr/share/elasticsearch/bin/elasticsearch-keystore add --stdin s3.client.default.access_key RUN echo $AWS_SECRET_ACCESS_KEY | /usr/share/elasticsearch/bin/elasticsearch-keystore add --stdin s3.client.default.secret_key ================================================ FILE: home/containers/docker/taskset_docker_containers/task_015_elastic_search_bkp_restore/ReadMe.md ================================================ - Docs Referred - [elasticsearch-backup-snapshot-and-restore-on-aws-s3](https://medium.com/@federicopanini/elasticsearch-backup-snapshot-and-restore-on-aws-s3-f1fc32fbca7f) - [opendistro](https://opendistro.github.io/for-elasticsearch-docs/docs/elasticsearch/snapshot-restore/#amazon-s3) ### Build image and deploy - Build new image ```bash docker build \ --build-arg ENV_VAR_AWS_ACCESS_KEY_ID= \ --build-arg ENV_VAR_AWS_SECRET_ACCESS_KEY= \ --tag=codeaprendiz/elasticsearch . ``` - Check the images present ```bash $ docker images | grep "codeaprendiz/elasticsearch" codeaprendiz/elasticsearch latest f06a06d5fd8a 36 seconds ago 796MB ``` - Running the image ```bash docker run -p 9200:9200 -p 9600:9600 codeaprendiz/elasticsearch ``` ### Register - Register your repo at S3 ```bash $ curl -X PUT -H "Content-Type: application/json" -d @register.json "http://localhost:9200/_snapshot/my-s3-repository" {"acknowledged":true} ``` ### Taking Snapshots - Taking a snapshot ```bash $ curl -X PUT -H "Content-Type: application/json" -d @snapshotsetting.json "http://localhost:9200/_snapshot/my-s3-repository/firstsnap?wait_for_completion=true" {"snapshot":{"snapshot":"firstsnap","uuid":"VpRaTS-eRr6TLqIOi9Zw2w","version_id":7060299,"version":"7.6.2","indices":[],"include_global_state":false,"state":"SUCCESS","start_time":"2020-05-16T14:13:06.219Z","start_time_in_millis":1589638386219,"end_time":"2020-05-16T14:13:06.624Z","end_time_in_millis":1589638386624,"duration_in_millis":405,"failures":[],"shards":{"total":0,"failed":0,"successful":0}}} ``` - Corresponding docker logs ```bash $ docker logs -f friendly_fermi {"type": "server", "timestamp": "2020-05-16T14:13:06,646Z", "level": "INFO", "component": "o.e.s.SnapshotsService", "cluster.name": "docker-cluster", "node.name": "676c35dac6af", "message": "snapshot [my-s3-repository:firstsnap/VpRaTS-eRr6TLqIOi9Zw2w] started", "cluster.uuid": "Crq-wvoIQmuzm920sZr8MA", "node.id": "Q_xnc6qyRxy-BbvRLQNwlg" } {"type": "server", "timestamp": "2020-05-16T14:13:08,852Z", "level": "INFO", "component": "o.e.s.SnapshotsService", "cluster.name": "docker-cluster", "node.name": "676c35dac6af", "message": "snapshot [my-s3-repository:firstsnap/VpRaTS-eRr6TLqIOi9Zw2w] completed with state [SUCCESS]", "cluster.uuid": "Crq-wvoIQmuzm920sZr8MA", "node.id": "Q_xnc6qyRxy-BbvRLQNwlg" } ``` - Request your snapshot ```bash $ curl -X GET "http://localhost:9200/_snapshot/my-s3-repository/firstsnap" {"snapshots":[{"snapshot":"firstsnap","uuid":"VpRaTS-eRr6TLqIOi9Zw2w","version_id":7060299,"version":"7.6.2","indices":[],"include_global_state":false,"state":"SUCCESS","start_time":"2020-05-16T14:13:06.219Z","start_time_in_millis":1589638386219,"end_time":"2020-05-16T14:13:06.624Z","end_time_in_millis":1589638386624,"duration_in_millis":405,"failures":[],"shards":{"total":0,"failed":0,"successful":0}}]} ``` - To check the status of snapshot ```bash $ curl -X GET "http://localhost:9200/_snapshot/_status" {"snapshots":[]} ``` - To check all the snapshots ```bash $ curl -X GET "http://localhost:9200/_snapshot/_all" {"my-s3-repository":{"type":"s3","settings":{"bucket":"elk-backup-codeaprendiz"}}} ``` - To see all the snapshots in a repository ```bash $ curl -X GET "http://localhost:9200/_snapshot/my-s3-repository/_all" {"snapshots":[{"snapshot":"firstsnap","uuid":"VwFvTv3nSKOD5K8J3EBE2A","version_id":7060299,"version":"7.6.2","indices":[],"include_global_state":false,"state":"SUCCESS","start_time":"2020-05-14T14:45:46.358Z","start_time_in_millis":1589467546358,"end_time":"2020-05-14T14:45:46.561Z","end_time_in_millis":1589467546561,"duration_in_millis":203,"failures":[],"shards":{"total":0,"failed":0,"successful":0}}]} ``` ### Restore your snapshot - To restore a snapshot ```bash $ curl -X POST -H "Content-Type: application/json" -d @restoresnapshot.json "http://localhost:9200/_snapshot/my-s3-repository/firstsnap/_restore" {"snapshot":{"snapshot":"firstsnap","indices":[],"shards":{"total":0,"failed":0,"successful":0}}} ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_015_elastic_search_bkp_restore/register.json ================================================ { "type": "s3", "settings": { "bucket": "elk-backup-codeaprendiz" } } ================================================ FILE: home/containers/docker/taskset_docker_containers/task_015_elastic_search_bkp_restore/restoresnapshot.json ================================================ { "indices": "kibana*,my-index*", "ignore_unavailable": true, "include_global_state": false, "include_aliases": false, "partial": false, "rename_pattern": "kibana(.+)", "rename_replacement": "restored-kibana$1", "index_settings": { "index.blocks.read_only": false }, "ignore_index_settings": [ "index.refresh_interval" ] } ================================================ FILE: home/containers/docker/taskset_docker_containers/task_015_elastic_search_bkp_restore/snapshotsetting.json ================================================ { "indices": "kibana*,my-index*,-my-index-2016", "ignore_unavailable": true, "include_global_state": false, "partial": false } ================================================ FILE: home/containers/docker/taskset_docker_containers/task_016_elastic_search_backup_restore_sample_data/Dockerfile ================================================ FROM docker.elastic.co/elasticsearch/elasticsearch:7.7.0 #this is to tell that we are passing these arguments at runtime ARG ENV_VAR_AWS_ACCESS_KEY_ID ARG ENV_VAR_AWS_SECRET_ACCESS_KEY ENV AWS_ACCESS_KEY_ID ${ENV_VAR_AWS_ACCESS_KEY_ID} ENV AWS_SECRET_ACCESS_KEY ${ENV_VAR_AWS_SECRET_ACCESS_KEY} ENV xpack.security.enabled 'false' ENV xpack.monitoring.enabled 'false' ENV xpack.graph.enabled 'false' ENV xpack.watcher.enabled 'false' ENV discovery.type 'single-node' ENV bootstrap.memory_lock 'true' ENV indices.memory.index_buffer_size '30%' RUN /usr/share/elasticsearch/bin/elasticsearch-plugin install --batch repository-s3 RUN /usr/share/elasticsearch/bin/elasticsearch-keystore create RUN echo $AWS_ACCESS_KEY_ID | /usr/share/elasticsearch/bin/elasticsearch-keystore add --stdin s3.client.default.access_key RUN echo $AWS_SECRET_ACCESS_KEY | /usr/share/elasticsearch/bin/elasticsearch-keystore add --stdin s3.client.default.secret_key ================================================ FILE: home/containers/docker/taskset_docker_containers/task_016_elastic_search_backup_restore_sample_data/ReadMe.md ================================================ - Docs Referred - [elasticsearch-backup-snapshot-and-restore-on-aws-s3](https://medium.com/@federicopanini/elasticsearch-backup-snapshot-and-restore-on-aws-s3-f1fc32fbca7f) - [opendistro](https://opendistro.github.io/for-elasticsearch-docs/docs/elasticsearch/snapshot-restore/#amazon-s3) - [elasticsearch](https://www.elastic.co/guide/en/kibana/7.7/tutorial-build-dashboard.html#load-dataset) ### Build image and deploy - Build new image ```bash docker build \ --build-arg ENV_VAR_AWS_ACCESS_KEY_ID= \ --build-arg ENV_VAR_AWS_SECRET_ACCESS_KEY= \ --tag=codeaprendiz/elasticsearch . ``` - Running the image ```bash docker run -p 9200:9200 -p 9600:9600 codeaprendiz/elasticsearch ``` ### Register - Register your repo at S3 ```bash $ curl -X PUT -H "Content-Type: application/json" -d @register.json "http://localhost:9200/_snapshot/my-s3-repository" {"acknowledged":true} ``` ### Download data ```bash curl -O https://download.elastic.co/demos/kibana/gettingstarted/8.x/shakespeare.json curl -O https://download.elastic.co/demos/kibana/gettingstarted/8.x/accounts.zip curl -O https://download.elastic.co/demos/kibana/gettingstarted/8.x/logs.jsonl.gz ``` ### Set Up Mapping - Set up mapping shakespeare ```bash curl -X PUT "localhost:9200/shakespeare?pretty" -H 'Content-Type: application/json' -d' { "mappings": { "properties": { "speaker": {"type": "keyword"}, "play_name": {"type": "keyword"}, "line_id": {"type": "integer"}, "speech_number": {"type": "integer"} } } } ' ``` - set up mapping logs index1 ``` curl -X PUT "localhost:9200/logstash-2015.05.18?pretty" -H 'Content-Type: application/json' -d' { "mappings": { "properties": { "geo": { "properties": { "coordinates": { "type": "geo_point" } } } } } } ' ``` - set up mapping logs index2 ```bash curl -X PUT "localhost:9200/logstash-2015.05.19?pretty" -H 'Content-Type: application/json' -d' { "mappings": { "properties": { "geo": { "properties": { "coordinates": { "type": "geo_point" } } } } } } ' ``` - set up mapping logs index3 ```bash curl -X PUT "localhost:9200/logstash-2015.05.20?pretty" -H 'Content-Type: application/json' -d' { "mappings": { "properties": { "geo": { "properties": { "coordinates": { "type": "geo_point" } } } } } } ' ``` ### Load the dataset - accounts.json ```bash $ ls accounts.json accounts.json curl -u elastic -H 'Content-Type: application/x-ndjson' -XPOST 'http://localhost:9200/bank/_bulk?pretty' --data-binary @accounts.json Enter host password for user 'elastic': changeit ``` - shakespeare.json ```bash $ ls shakespeare.json shakespeare.json $ curl -u elastic -H 'Content-Type: application/x-ndjson' -XPOST 'http://localhost:9200/shakespeare/_bulk?pretty' --data-binary @shakespeare.json Enter host password for user 'elastic': changeit ``` - logs.jsonl ```bash $ ls logs.jsonl logs.jsonl $ curl -u elastic -H 'Content-Type: application/x-ndjson' -XPOST 'http://localhost:9200/_bulk?pretty' --data-binary @logs.jsonl Enter host password for user 'elastic': changeit ``` - Verify successful loading ```bash $ curl -X GET "localhost:9200/_cat/indices?v&pretty" health status index uuid pri rep docs.count docs.deleted store.size pri.store.size yellow open bank D3IZamRJTSaMiQalyytz9Q 1 1 1000 0 381.8kb 381.8kb yellow open shakespeare eRGAw6pgRnyScN5kIH6ZzQ 1 1 0 0 5.9mb 5.9mb yellow open logstash-2015.05.20 qjAB6tF1Q2azhnE6e_NoxQ 1 1 0 0 5.7mb 5.7mb yellow open logstash-2015.05.18 sg3cCzaVQyunHmwDpWK7gQ 1 1 0 0 5.5mb 5.5mb yellow open logstash-2015.05.19 BOvYmnU6QB-Wp0ITC0wN1g 1 1 0 0 5.6mb 5.6mb ``` - Check after 15 minutes (CHECK THE STATE HERE. LATER WE WILL RESTORE THE DATA FROM S3 and COMPARE ITS STATE) ```bash $ curl -X GET "localhost:9200/_cat/indices?v&pretty" health status index uuid pri rep docs.count docs.deleted store.size pri.store.size yellow open bank D3IZamRJTSaMiQalyytz9Q 1 1 1000 0 381.9kb 381.9kb yellow open shakespeare eRGAw6pgRnyScN5kIH6ZzQ 1 1 111396 0 18.2mb 18.2mb yellow open logstash-2015.05.20 qjAB6tF1Q2azhnE6e_NoxQ 1 1 4750 0 14mb 14mb yellow open logstash-2015.05.18 sg3cCzaVQyunHmwDpWK7gQ 1 1 4631 0 13.7mb 13.7mb yellow open logstash-2015.05.19 BOvYmnU6QB-Wp0ITC0wN1g 1 1 4624 0 13.8mb 13.8mb ``` ### Index Mapping before restore ```bash $ curl -X GET "http://localhost:9200/bank/_mapping" {"bank":{"mappings":{"properties":{"account_number":{"type":"long"},"address":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"age":{"type":"long"},"balance":{"type":"long"},"city":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"email":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"employer":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"firstname":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"gender":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"lastname":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"state":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}}}}}} ``` ```bash $ curl -X GET "http://localhost:9200/shakespeare/_mapping" {"shakespeare":{"mappings":{"properties":{"line_id":{"type":"integer"},"line_number":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"play_name":{"type":"keyword"},"speaker":{"type":"keyword"},"speech_number":{"type":"integer"},"text_entry":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"type":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}}}}}} ``` ### Taking Snapshots - Taking a snapshot (this may take some time. be patient!) ```bash curl -X PUT -H "Content-Type: application/json" -d @snapshotsetting.json "http://localhost:9200/_snapshot/my-s3-repository/finalsnap?wait_for_completion=true" {"snapshot":{"snapshot":"finalsnap","uuid":"rWvtq0cBQIqFoUDc8pYFNA","version_id":7070099,"version":"7.7.0","indices":["logstash-2015.05.20","logstash-2015.05.19","shakespeare","logstash-2015.05.18","bank"],"include_global_state":false,"state":"SUCCESS","start_time":"2020-05-17T20:16:14.563Z","start_time_in_millis":1589746574563,"end_time":"2020-05-17T20:17:38.094Z","end_time_in_millis":1589746658094,"duration_in_millis":83531,"failures":[],"shards":{"total":5,"failed":0,"successful":5}}} ``` - To check the status of snapshot ```bash $ curl -X GET "http://localhost:9200/_snapshot/_status" {"snapshots":[{"snapshot":"finalsnap","repository":"my-s3-repository","uuid":"rWvtq0cBQIqFoUDc8pYFNA","state":"STARTED","include_global_state":false,"shards_stats":{"initializing":0,"started":5,"finalizing":0,"done":0,"failed":0,"total":5},"stats":{"incremental":{"file_count":20,"size_in_bytes":63273242},"processed":{"file_count":4,"size_in_bytes":1704},"total":{"file_count":20,"size_in_bytes":63273242},"start_time_in_millis":1589746574563,"time_in_millis":41848},"indices":{"bank":{"shards_stats":{"initializing":0,"started":1,"finalizing":0,"done":0,"failed":0,"total":1},"stats":{"incremental":{"file_count":4,"size_in_bytes":391085},"processed":{"file_count":0,"size_in_bytes":0},"total":{"file_count":4,"size_in_bytes":391085},"start_time_in_millis":1589746576366,"time_in_millis":0},"shards":{"0":{"stage":"STARTED","stats":{"incremental":{"file_count":4,"size_in_bytes":391085},"processed":{"file_count":0,"size_in_bytes":0},"total":{"file_count":4,"size_in_bytes":391085},"start_time_in_millis":1589746576366,"time_in_millis":0},"node":"vp977yjVTHW5915UCPRBYA"}}},"shakespeare":{"shards_stats":{"initializing":0,"started":1,"finalizing":0,"done":0,"failed":0,"total":1},"stats":{"incremental":{"file_count":4,"size_in_bytes":19179417},"processed":{"file_count":1,"size_in_bytes":426},"total":{"file_count":4,"size_in_bytes":19179417},"start_time_in_millis":1589746576366,"time_in_millis":0},"shards":{"0":{"stage":"STARTED","stats":{"incremental":{"file_count":4,"size_in_bytes":19179417},"processed":{"file_count":1,"size_in_bytes":426},"total":{"file_count":4,"size_in_bytes":19179417},"start_time_in_millis":1589746576366,"time_in_millis":0},"node":"vp977yjVTHW5915UCPRBYA"}}},"logstash-2015.05.20":{"shards_stats":{"initializing":0,"started":1,"finalizing":0,"done":0,"failed":0,"total":1},"stats":{"incremental":{"file_count":4,"size_in_bytes":14772605},"processed":{"file_count":1,"size_in_bytes":426},"total":{"file_count":4,"size_in_bytes":14772605},"start_time_in_millis":1589746576366,"time_in_millis":0},"shards":{"0":{"stage":"STARTED","stats":{"incremental":{"file_count":4,"size_in_bytes":14772605},"processed":{"file_count":1,"size_in_bytes":426},"total":{"file_count":4,"size_in_bytes":14772605},"start_time_in_millis":1589746576366,"time_in_millis":0},"node":"vp977yjVTHW5915UCPRBYA"}}},"logstash-2015.05.18":{"shards_stats":{"initializing":0,"started":1,"finalizing":0,"done":0,"failed":0,"total":1},"stats":{"incremental":{"file_count":4,"size_in_bytes":14394696},"processed":{"file_count":1,"size_in_bytes":426},"total":{"file_count":4,"size_in_bytes":14394696},"start_time_in_millis":1589746576366,"time_in_millis":0},"shards":{"0":{"stage":"STARTED","stats":{"incremental":{"file_count":4,"size_in_bytes":14394696},"processed":{"file_count":1,"size_in_bytes":426},"total":{"file_count":4,"size_in_bytes":14394696},"start_time_in_millis":1589746576366,"time_in_millis":0},"node":"vp977yjVTHW5915UCPRBYA"}}},"logstash-2015.05.19":{"shards_stats":{"initializing":0,"started":1,"finalizing":0,"done":0,"failed":0,"total":1},"stats":{"incremental":{"file_count":4,"size_in_bytes":14535439},"processed":{"file_count":1,"size_in_bytes":426},"total":{"file_count":4,"size_in_bytes":14535439},"start_time_in_millis":1589746576366,"time_in_millis":0},"shards":{"0":{"stage":"STARTED","stats":{"incremental":{"file_count":4,"size_in_bytes":14535439},"processed":{"file_count":1,"size_in_bytes":426},"total":{"file_count":4,"size_in_bytes":14535439},"start_time_in_millis":1589746576366,"time_in_millis":0},"node":"vp977yjVTHW5915UCPRBYA"}}}}}]} $ curl -X GET "http://localhost:9200/_snapshot/_status" {"snapshots":[]} ``` - Request your snapshot ```bash $ curl -X GET "http://localhost:9200/_snapshot/my-s3-repository/finalsnap" curl -X GET "http://localhost:9200/_snapshot/my-s3-repository/finalsnap" {"snapshots":[{"snapshot":"finalsnap","uuid":"rWvtq0cBQIqFoUDc8pYFNA","version_id":7070099,"version":"7.7.0","indices":["logstash-2015.05.20","logstash-2015.05.19","shakespeare","logstash-2015.05.18","bank"],"include_global_state":false,"state":"SUCCESS","start_time":"2020-05-17T20:16:14.563Z","start_time_in_millis":1589746574563,"end_time":"2020-05-17T20:17:38.094Z","end_time_in_millis":1589746658094,"duration_in_millis":83531,"failures":[],"shards":{"total":5,"failed":0,"successful":5}}]} ``` - To check all the snapshots ```bash $ curl -X GET "http://localhost:9200/_snapshot/_all" {"my-s3-repository":{"type":"s3","settings":{"bucket":"elk-backup-codeaprendiz"}}} ``` - To see all the snapshots in a repository ```bash $ curl -X GET "http://localhost:9200/_snapshot/my-s3-repository/_all" {"snapshots":[{"snapshot":"firstsnap","uuid":"VpRaTS-eRr6TLqIOi9Zw2w","version_id":7060299,"version":"7.6.2","indices":[],"include_global_state":false,"state":"SUCCESS","start_time":"2020-05-16T14:13:06.219Z","start_time_in_millis":1589638386219,"end_time":"2020-05-16T14:13:06.624Z","end_time_in_millis":1589638386624,"duration_in_millis":405,"failures":[],"shards":{"total":0,"failed":0,"successful":0}},{"snapshot":"finalsnap","uuid":"rWvtq0cBQIqFoUDc8pYFNA","version_id":7070099,"version":"7.7.0","indices":["logstash-2015.05.20","logstash-2015.05.19","shakespeare","logstash-2015.05.18","bank"],"include_global_state":false,"state":"SUCCESS","start_time":"2020-05-17T20:16:14.563Z","start_time_in_millis":1589746574563,"end_time":"2020-05-17T20:17:38.094Z","end_time_in_millis":1589746658094,"duration_in_millis":83531,"failures":[],"shards":{"total":5,"failed":0,"successful":5}}]} ``` ### Restore your snapshot after creating a new docker container - Kill the previous(ctr+D) docker container and start a new container. You will need to register you S3 bucket again. See the command given at the begining. - After the new container is started and registered. The state before ```bash $ curl -X GET "localhost:9200/_cat/indices?v&pretty" health status index uuid pri rep docs.count docs.deleted store.size pri.store.size ``` - To restore a snapshot ```bash $ curl -X POST -H "Content-Type: application/json" -d @restoresnapshot.json "http://localhost:9200/_snapshot/my-s3-repository/finalsnap/_restore" {"accepted":true} ``` - Now check the state after (COMPARE WITH PREVIOUS STATE) ```bash $ curl -X GET "localhost:9200/_cat/indices?v&pretty" health status index uuid pri rep docs.count docs.deleted store.size pri.store.size yellow open bank smFJpJJ-S9KiGyw8Ysc5Vw 1 1 1000 0 381.9kb 381.9kb yellow open shakespeare cs8TZUIPStyjL5_92lcPqw 1 1 111396 0 18.2mb 18.2mb yellow open logstash-2015.05.20 lOdp-I-wRt-DTK3GxDHbuw 1 1 4750 0 14mb 14mb yellow open logstash-2015.05.18 i8P-GUM_S_CYMwBh-nO4pQ 1 1 4631 0 13.7mb 13.7mb yellow open logstash-2015.05.19 XStCeqfgRSSxbNEo9Gdy9w 1 1 4624 0 13.8mb 13.8mb ``` ### Index Mapping after restore ```bash $ curl -X GET "http://localhost:9200/bank/_mapping" {"bank":{"mappings":{"properties":{"account_number":{"type":"long"},"address":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"age":{"type":"long"},"balance":{"type":"long"},"city":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"email":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"employer":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"firstname":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"gender":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"lastname":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"state":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}}}}}} ``` ```bash $ curl -X GET "http://localhost:9200/shakespeare/_mapping" {"shakespeare":{"mappings":{"properties":{"line_id":{"type":"integer"},"line_number":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"play_name":{"type":"keyword"},"speaker":{"type":"keyword"},"speech_number":{"type":"integer"},"text_entry":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}},"type":{"type":"text","fields":{"keyword":{"type":"keyword","ignore_above":256}}}}}}} ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_016_elastic_search_backup_restore_sample_data/createdata.json ================================================ { "mappings":{ "employees":{ "properties":{ "FirstName":{ "type":"text" }, "LastName":{ "type":"text" }, "Designation":{ "type":"text" }, "Salary":{ "type":"integer" }, "DateOfJoining":{ "type":"date", "format":"yyyy-MM-dd" }, "Address":{ "type":"text" }, "Gender":{ "type":"text" }, "Age":{ "type":"integer" }, "MaritalStatus":{ "type":"text" }, "Interests":{ "type":"text" } } } } } ================================================ FILE: home/containers/docker/taskset_docker_containers/task_016_elastic_search_backup_restore_sample_data/register.json ================================================ { "type": "s3", "settings": { "bucket": "elk-backup-codeaprendiz" } } ================================================ FILE: home/containers/docker/taskset_docker_containers/task_016_elastic_search_backup_restore_sample_data/restoresnapshot.json ================================================ { "indices": "bank*,shakespeare*,logstash*", "ignore_unavailable": true, "include_global_state": false, "include_aliases": false, "partial": false, "rename_pattern": "kibana(.+)", "rename_replacement": "restored-kibana$1", "index_settings": { "index.blocks.read_only": false }, "ignore_index_settings": [ "index.refresh_interval" ] } ================================================ FILE: home/containers/docker/taskset_docker_containers/task_016_elastic_search_backup_restore_sample_data/snapshotsetting.json ================================================ { "indices": "bank*,shakespeare*,logstash*", "ignore_unavailable": true, "include_global_state": false, "partial": false } ================================================ FILE: home/containers/docker/taskset_docker_containers/task_017_mem_and_cpu_limit_container/ReadMe.md ================================================ ### To set the upper limit on memory to 512 megabytes used by nginx container ```bash $ docker run -m 512m nginx $ docker ps | egrep -v "k8s" CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 6959c8b602ba nginx "/docker-entrypoint.…" About a minute ago Up About a minute 80/tcp intelligent_bartik $ docker stats intelligent_bartik CONTAINER ID NAME CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS 6959c8b602ba intelligent_bartik 0.00% 4.316MiB / 512MiB 0.84% 1.17kB / 0B 7.42MB / 8.19kB 2 ``` ### To set the upper limit on the number of CPUs used by nginx container By default, access to the computing power of the host machine is unlimited. We can set the CPUs limit using the cpus parameter. For example, let's constrain our container to use at most two CPUs: ```bash $ docker run --cpus=2 nginx ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_018_mysql/ReadMe.md ================================================ [Link](https://medium.com/@crmcmullen/how-to-run-mysql-in-a-docker-container-on-macos-with-persistent-local-data-58b89aec496a) - To stop the mysql process on mac OS ```bash $ sudo launchctl unload -w /Library/LaunchDaemons/com.oracle.oss.mysql.mysqld.plist /Library/LaunchDaemons/com.oracle.oss.mysql.mysqld.plist: Operation now in progress ``` - Create persistent directory ```bash $ mkdir /Users/[your_username]/Develop $ mkdir /Users/[your_username]/Develop/mysql_data $ mkdir /Users/[your_username]/Develop/mysql_data/8.0 ``` - Create docker network ```bash $ docker network create dev-network ``` - Starting the docker container on local ```bash $ docker run --restart always --name mysql8.0 --net dev-network -v /Users/ankitsinghrathi/Develop/mysql_data/8.0:/var/lib/mysql -p 3306:3306 -d -e MYSQL_ROOT_PASSWORD=password mysql:8.0 ``` - Connecting to mysql ```bash $ mysql -h127.0.0.1 -uroot -ppassword mysql: [Warning] Using a password on the command line interface can be insecure. Welcome to the MySQL monitor. Commands end with ; or \g. Your MySQL connection id is 10 Server version: 8.0.23 MySQL Community Server - GPL Copyright (c) 2000, 2021, Oracle and/or its affiliates. Oracle is a registered trademark of Oracle Corporation and/or its affiliates. Other names may be trademarks of their respective owners. Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. mysql> ``` - Connecting to mysql using python ```bash $ pip3 install mysql-connector-python ``` ```python import mysql.connector mydb = mysql.connector.connect( host="localhost", user="root", password="password" ) mycursor = mydb.cursor() mycursor.execute("CREATE DATABASE mydatabase") mycursor.execute("show databases") for x in mycursor: print(x) ... ('information_schema',) ('mydatabase',) ('mysql',) ('performance_schema',) ('sys',) ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_019_nginx_https_domain_test/ReadMe.md ================================================ To test the certificates are working - nslookup to domain ```bash $ nslookup server.domain.com Server: 127.0.0.53 Address: 127.0.0.53#53 Non-authoritative answer: Name: server.domain.com Address: 23.12.43.56 ``` - Login to the server 23.12.43.56. Assuming the certificates and key are valid for `*.domain.com` ```bash $ ls -ltrh total 20K -rwxrwxrwx 1 server server 3.2K Aug 20 16:10 star_domain.com.key -rwxrwxrwx 1 server server 8.5K Aug 20 16:10 star_domain_com.chained.crt -rwxrwxrwx 1 server server 335 Aug 20 16:45 nginx.conf ``` - Contents of nginx.conf ```bash $ cat nginx.conf server { listen 443 ssl; server_name prod.domain.com; ssl_certificate /etc/nginx/certs/star_domain_com.chained.crt; ssl_certificate_key /etc/nginx/certs/star_domain.com.key; location / { allow all; root /usr/share/nginx/html; } root /usr/share/nginx/html; index index.html; } ``` - Start the docker container ```bash $ docker run --rm -p 80:80 -p 443:443 \ --name nginx_service \ -v $PWD/star_domain.com.key:/etc/nginx/certs/star_domain.com.key \ -v $PWD/star_domain_com.chained.crt:/etc/nginx/certs/star_domain_com.chained.crt \ -v $PWD/nginx.conf:/etc/nginx/conf.d/default.conf \ nginx:latest ``` - Visit `https://server.domain.com/` on browser and validate if its being loaded ================================================ FILE: home/containers/docker/taskset_docker_containers/task_020_docker_commit/ReadMe.md ================================================ # docker commit It can be useful to commit a container’s file changes or settings into a new image. This allows you to debug a container by running an interactive shell, or to export a working dataset to another server [docs.docker.com/engine/reference/commandline/commit](https://docs.docker.com/engine/reference/commandline/commit) **High Level Objectives** - start ubuntu container with bash. Make some changes - commit the container state to image - start new container with our new image, validate our changes - add env using --change and repeat validation process **Skills** - docker - docker commit - commit - docker images - docker inspect - docker commit --change **Version Stack** | Stack | Version | |--------|----------| | docker | 20.10.14 | # run ubuntu container ```bash # Terminal session t1 ❯ docker run -it --rm --name ubuntu ubuntu bash root@4d74a15a73eb:/# cd /home root@4d74a15a73eb:/home# ls root@4d74a15a73eb:/home# echo "I am new file" > file.txt root@4d74a15a73eb:/home# ls file.txt root@4d74a15a73eb:/home# ## New Terminal session t2 ❯ docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 4d74a15a73eb ubuntu "bash" 45 seconds ago Up 44 seconds ubuntu ❯ docker commit 4d74a15a73eb my-ubuntu:v1 sha256:9fec4fd33de4966790bb4b2920abeefc4d33513c8c2bbb3641d6609caab2086f ❯ docker images | head -n 2 REPOSITORY TAG IMAGE ID CREATED SIZE my-ubuntu v1 9fec4fd33de4 12 seconds ago 69.2MB # terminal session t1 root@4d74a15a73eb:/home# root@4d74a15a73eb:/home# exit exit ❯ docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES ❯ docker run -it --rm --name ubuntu my-ubuntu:v1 bash root@8d61e1d9a312:/# cd /home root@8d61e1d9a312:/home# ls file.txt # terminal session t2 ❯ docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 8d61e1d9a312 my-ubuntu:v1 "bash" 6 minutes ago Up 6 minutes ubuntu ❯ docker inspect -f "{{ .Config.Env }}" 8d61e1d9a312 [PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin] ❯ docker commit --change "ENV DEBUG=true" 8d61e1d9a312 my-ubuntu:v2 sha256:e7eb352de7b38ae12046552925d2ff9ef61d1dc14894a709dd28c499dda50243 ❯ docker images | head -n 2 REPOSITORY TAG IMAGE ID CREATED SIZE my-ubuntu v2 e7eb352de7b3 About a minute ago 214MB # Terminal session t1 root@4d74a15a73eb:/home# root@8d61e1d9a312:/home# exit exit ❯ docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES ❯ docker run -it --rm --name ubuntu my-ubuntu:v2 bash root@072729702c8b:/# env | grep DEBUG DEBUG=true # Terminal session t2 ❯ docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 072729702c8b my-ubuntu:v2 "bash" 2 minutes ago Up 2 minutes ubuntu ❯ docker inspect -f "{{ .Config.Env }}" 072729702c8b [PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin DEBUG=true] ``` ================================================ FILE: home/containers/docker/taskset_docker_containers/task_021_kibana/ReadMe.md ================================================ To Start Kibana on local and connect it to local docker-elastic-search on local - For starting elastic search refer to the [task-005-elastic-search](../task-005-elastic-search) - Check if elastic-search is running on local ```bash $ docker ps | egrep elastic 7fc9a4e5361c docker.elastic.co/elasticsearch/elasticsearch:7.8.1 "/tini -- /usr/local…" 7 minutes ago Up 7 minutes 0.0.0.0:9200->9200/tcp, 0.0.0.0:9300->9300/tcp docker run --link dreamy_borg:elasticsearch -p 5601:5601 docker.elastic.co/kibana/kibana:7.8.1 ``` - Start the docker-kibanan on local ```bash $ docker run --link dreamy_borg:elasticsearch -p 5601:5601 docker.elastic.co/kibana/kibana:7.8.1 ``` - Kibana will not come up if its unable to connect to elastic search. You will see the following in logs ```bash {"type":"log","@timestamp":"2020-07-30T09:37:59Z","tags":["warning","elasticsearch","admin"],"pid":7,"message":"Unable to revive connection: http://elasticsearch:9200/"} ``` - Once the Kibana is up you can visit the dashboard at [http://0.0.0.0:5601/app/kibana](http://0.0.0.0:5601/app/kibana) You can verify the elastic search connection by checking Index Pattern settings page. ![img](./.images/kibana-dashboard.png) ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/ReadMe.md ================================================ - [Sample APP](https://docs.docker.com/get-started/02_our_app/) - Build ```bash $ docker build -t getting-started . ``` - Start the app container ```bash $ docker run -dp 3000:3000 getting-started ec25cae23f5fa1d421c7a750b70d1ba914286eee2e46f464db8fbb8d1f7314ba ``` - Then visit the app on localhost:3000 ![](.images/2022-07-24-11-00-29.png) ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/Dockerfile ================================================ # syntax=docker/dockerfile:1 FROM node:12-alpine RUN apk add --no-cache python2 g++ make WORKDIR /app COPY . . RUN yarn install --production CMD ["node", "src/index.js"] EXPOSE 3000 ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/package.json ================================================ { "name": "101-app", "version": "1.0.0", "main": "index.js", "license": "MIT", "scripts": { "prettify": "prettier -l --write \"**/*.js\"", "test": "jest", "dev": "nodemon src/index.js" }, "dependencies": { "express": "^4.17.1", "mysql": "^2.17.1", "sqlite3": "^5.0.0", "uuid": "^3.3.3", "wait-port": "^0.2.2" }, "resolutions": { "ansi-regex": "5.0.1" }, "prettier": { "trailingComma": "all", "tabWidth": 4, "useTabs": false, "semi": true, "singleQuote": true }, "devDependencies": { "jest": "^27.2.5", "nodemon": "^2.0.13", "prettier": "^1.18.2" } } ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/spec/persistence/sqlite.spec.js ================================================ const db = require('../../src/persistence/sqlite'); const fs = require('fs'); const location = process.env.SQLITE_DB_LOCATION || '/etc/todos/todo.db'; const ITEM = { id: '7aef3d7c-d301-4846-8358-2a91ec9d6be3', name: 'Test', completed: false, }; beforeEach(() => { if (fs.existsSync(location)) { fs.unlinkSync(location); } }); test('it initializes correctly', async () => { await db.init(); }); test('it can store and retrieve items', async () => { await db.init(); await db.storeItem(ITEM); const items = await db.getItems(); expect(items.length).toBe(1); expect(items[0]).toEqual(ITEM); }); test('it can update an existing item', async () => { await db.init(); const initialItems = await db.getItems(); expect(initialItems.length).toBe(0); await db.storeItem(ITEM); await db.updateItem( ITEM.id, Object.assign({}, ITEM, { completed: !ITEM.completed }), ); const items = await db.getItems(); expect(items.length).toBe(1); expect(items[0].completed).toBe(!ITEM.completed); }); test('it can remove an existing item', async () => { await db.init(); await db.storeItem(ITEM); await db.removeItem(ITEM.id); const items = await db.getItems(); expect(items.length).toBe(0); }); test('it can get a single item', async () => { await db.init(); await db.storeItem(ITEM); const item = await db.getItem(ITEM.id); expect(item).toEqual(ITEM); }); ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/spec/routes/addItem.spec.js ================================================ const db = require('../../src/persistence'); const addItem = require('../../src/routes/addItem'); const ITEM = { id: 12345 }; const uuid = require('uuid/v4'); jest.mock('uuid/v4', () => jest.fn()); jest.mock('../../src/persistence', () => ({ removeItem: jest.fn(), storeItem: jest.fn(), getItem: jest.fn(), })); test('it stores item correctly', async () => { const id = 'something-not-a-uuid'; const name = 'A sample item'; const req = { body: { name } }; const res = { send: jest.fn() }; uuid.mockReturnValue(id); await addItem(req, res); const expectedItem = { id, name, completed: false }; expect(db.storeItem.mock.calls.length).toBe(1); expect(db.storeItem.mock.calls[0][0]).toEqual(expectedItem); expect(res.send.mock.calls[0].length).toBe(1); expect(res.send.mock.calls[0][0]).toEqual(expectedItem); }); ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/spec/routes/deleteItem.spec.js ================================================ const db = require('../../src/persistence'); const deleteItem = require('../../src/routes/deleteItem'); const ITEM = { id: 12345 }; jest.mock('../../src/persistence', () => ({ removeItem: jest.fn(), getItem: jest.fn(), })); test('it removes item correctly', async () => { const req = { params: { id: 12345 } }; const res = { sendStatus: jest.fn() }; await deleteItem(req, res); expect(db.removeItem.mock.calls.length).toBe(1); expect(db.removeItem.mock.calls[0][0]).toBe(req.params.id); expect(res.sendStatus.mock.calls[0].length).toBe(1); expect(res.sendStatus.mock.calls[0][0]).toBe(200); }); ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/spec/routes/getItems.spec.js ================================================ const db = require('../../src/persistence'); const getItems = require('../../src/routes/getItems'); const ITEMS = [{ id: 12345 }]; jest.mock('../../src/persistence', () => ({ getItems: jest.fn(), })); test('it gets items correctly', async () => { const req = {}; const res = { send: jest.fn() }; db.getItems.mockReturnValue(Promise.resolve(ITEMS)); await getItems(req, res); expect(db.getItems.mock.calls.length).toBe(1); expect(res.send.mock.calls[0].length).toBe(1); expect(res.send.mock.calls[0][0]).toEqual(ITEMS); }); ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/spec/routes/updateItem.spec.js ================================================ const db = require('../../src/persistence'); const updateItem = require('../../src/routes/updateItem'); const ITEM = { id: 12345 }; jest.mock('../../src/persistence', () => ({ getItem: jest.fn(), updateItem: jest.fn(), })); test('it updates items correctly', async () => { const req = { params: { id: 1234 }, body: { name: 'New title', completed: false }, }; const res = { send: jest.fn() }; db.getItem.mockReturnValue(Promise.resolve(ITEM)); await updateItem(req, res); expect(db.updateItem.mock.calls.length).toBe(1); expect(db.updateItem.mock.calls[0][0]).toBe(req.params.id); expect(db.updateItem.mock.calls[0][1]).toEqual({ name: 'New title', completed: false, }); expect(db.getItem.mock.calls.length).toBe(1); expect(db.getItem.mock.calls[0][0]).toBe(req.params.id); expect(res.send.mock.calls[0].length).toBe(1); expect(res.send.mock.calls[0][0]).toEqual(ITEM); }); ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/src/index.js ================================================ const express = require('express'); const app = express(); const db = require('./persistence'); const getItems = require('./routes/getItems'); const addItem = require('./routes/addItem'); const updateItem = require('./routes/updateItem'); const deleteItem = require('./routes/deleteItem'); app.use(express.json()); app.use(express.static(__dirname + '/static')); app.get('/items', getItems); app.post('/items', addItem); app.put('/items/:id', updateItem); app.delete('/items/:id', deleteItem); db.init().then(() => { app.listen(3000, () => console.log('Listening on port 3000')); }).catch((err) => { console.error(err); process.exit(1); }); const gracefulShutdown = () => { db.teardown() .catch(() => {}) .then(() => process.exit()); }; process.on('SIGINT', gracefulShutdown); process.on('SIGTERM', gracefulShutdown); process.on('SIGUSR2', gracefulShutdown); // Sent by nodemon ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/src/persistence/index.js ================================================ if (process.env.MYSQL_HOST) module.exports = require('./mysql'); else module.exports = require('./sqlite'); ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/src/persistence/mysql.js ================================================ const waitPort = require('wait-port'); const fs = require('fs'); const mysql = require('mysql'); const { MYSQL_HOST: HOST, MYSQL_HOST_FILE: HOST_FILE, MYSQL_USER: USER, MYSQL_USER_FILE: USER_FILE, MYSQL_PASSWORD: PASSWORD, MYSQL_PASSWORD_FILE: PASSWORD_FILE, MYSQL_DB: DB, MYSQL_DB_FILE: DB_FILE, } = process.env; let pool; async function init() { const host = HOST_FILE ? fs.readFileSync(HOST_FILE) : HOST; const user = USER_FILE ? fs.readFileSync(USER_FILE) : USER; const password = PASSWORD_FILE ? fs.readFileSync(PASSWORD_FILE) : PASSWORD; const database = DB_FILE ? fs.readFileSync(DB_FILE) : DB; await waitPort({ host, port : 3306}); pool = mysql.createPool({ connectionLimit: 5, host, user, password, database, charset: 'utf8mb4', }); return new Promise((acc, rej) => { pool.query( 'CREATE TABLE IF NOT EXISTS todo_items (id varchar(36), name varchar(255), completed boolean) DEFAULT CHARSET utf8mb4', err => { if (err) return rej(err); console.log(`Connected to mysql db at host ${HOST}`); acc(); }, ); }); } async function teardown() { return new Promise((acc, rej) => { pool.end(err => { if (err) rej(err); else acc(); }); }); } async function getItems() { return new Promise((acc, rej) => { pool.query('SELECT * FROM todo_items', (err, rows) => { if (err) return rej(err); acc( rows.map(item => Object.assign({}, item, { completed: item.completed === 1, }), ), ); }); }); } async function getItem(id) { return new Promise((acc, rej) => { pool.query('SELECT * FROM todo_items WHERE id=?', [id], (err, rows) => { if (err) return rej(err); acc( rows.map(item => Object.assign({}, item, { completed: item.completed === 1, }), )[0], ); }); }); } async function storeItem(item) { return new Promise((acc, rej) => { pool.query( 'INSERT INTO todo_items (id, name, completed) VALUES (?, ?, ?)', [item.id, item.name, item.completed ? 1 : 0], err => { if (err) return rej(err); acc(); }, ); }); } async function updateItem(id, item) { return new Promise((acc, rej) => { pool.query( 'UPDATE todo_items SET name=?, completed=? WHERE id=?', [item.name, item.completed ? 1 : 0, id], err => { if (err) return rej(err); acc(); }, ); }); } async function removeItem(id) { return new Promise((acc, rej) => { pool.query('DELETE FROM todo_items WHERE id = ?', [id], err => { if (err) return rej(err); acc(); }); }); } module.exports = { init, teardown, getItems, getItem, storeItem, updateItem, removeItem, }; ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/src/persistence/sqlite.js ================================================ const sqlite3 = require('sqlite3').verbose(); const fs = require('fs'); const location = process.env.SQLITE_DB_LOCATION || '/etc/todos/todo.db'; let db, dbAll, dbRun; function init() { const dirName = require('path').dirname(location); if (!fs.existsSync(dirName)) { fs.mkdirSync(dirName, { recursive: true }); } return new Promise((acc, rej) => { db = new sqlite3.Database(location, err => { if (err) return rej(err); if (process.env.NODE_ENV !== 'test') console.log(`Using sqlite database at ${location}`); db.run( 'CREATE TABLE IF NOT EXISTS todo_items (id varchar(36), name varchar(255), completed boolean)', (err, result) => { if (err) return rej(err); acc(); }, ); }); }); } async function teardown() { return new Promise((acc, rej) => { db.close(err => { if (err) rej(err); else acc(); }); }); } async function getItems() { return new Promise((acc, rej) => { db.all('SELECT * FROM todo_items', (err, rows) => { if (err) return rej(err); acc( rows.map(item => Object.assign({}, item, { completed: item.completed === 1, }), ), ); }); }); } async function getItem(id) { return new Promise((acc, rej) => { db.all('SELECT * FROM todo_items WHERE id=?', [id], (err, rows) => { if (err) return rej(err); acc( rows.map(item => Object.assign({}, item, { completed: item.completed === 1, }), )[0], ); }); }); } async function storeItem(item) { return new Promise((acc, rej) => { db.run( 'INSERT INTO todo_items (id, name, completed) VALUES (?, ?, ?)', [item.id, item.name, item.completed ? 1 : 0], err => { if (err) return rej(err); acc(); }, ); }); } async function updateItem(id, item) { return new Promise((acc, rej) => { db.run( 'UPDATE todo_items SET name=?, completed=? WHERE id = ?', [item.name, item.completed ? 1 : 0, id], err => { if (err) return rej(err); acc(); }, ); }); } async function removeItem(id) { return new Promise((acc, rej) => { db.run('DELETE FROM todo_items WHERE id = ?', [id], err => { if (err) return rej(err); acc(); }); }); } module.exports = { init, teardown, getItems, getItem, storeItem, updateItem, removeItem, }; ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/src/routes/addItem.js ================================================ const db = require('../persistence'); const uuid = require('uuid/v4'); module.exports = async (req, res) => { const item = { id: uuid(), name: req.body.name, completed: false, }; await db.storeItem(item); res.send(item); }; ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/src/routes/deleteItem.js ================================================ const db = require('../persistence'); module.exports = async (req, res) => { await db.removeItem(req.params.id); res.sendStatus(200); }; ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/src/routes/getItems.js ================================================ const db = require('../persistence'); module.exports = async (req, res) => { const items = await db.getItems(); res.send(items); }; ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/src/routes/updateItem.js ================================================ const db = require('../persistence'); module.exports = async (req, res) => { await db.updateItem(req.params.id, { name: req.body.name, completed: req.body.completed, }); const item = await db.getItem(req.params.id); res.send(item); }; ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/src/static/css/font-awesome/fa-brands-400.svg#fontawesome ================================================ Created by FontForge 20190801 at Thu Aug 22 14:41:09 2019 By Robert Madole Copyright (c) Font Awesome ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/src/static/css/font-awesome/fa-regular-400.svg#fontawesome ================================================ Created by FontForge 20190801 at Thu Aug 22 14:41:09 2019 By Robert Madole Copyright (c) Font Awesome ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/src/static/css/font-awesome/fa-solid-900.svg#fontawesome ================================================ Created by FontForge 20190801 at Thu Aug 22 14:41:09 2019 By Robert Madole Copyright (c) Font Awesome ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/src/static/css/styles.css ================================================ body { background-color: #f4f4f4; margin-top: 50px; font-family: 'Lato'; } .item { background-color: white; padding: 15px; margin-bottom: 15px; border: transparent; border-radius: 5px; box-shadow: 0 0 1em #ccc; transition: all .2s ease-in-out; } .item:hover { box-shadow: 0 0 1em #aaa; } .item.completed { text-decoration: line-through; } .toggles { color: black; } .name { padding-top: 3px; } .remove { padding-left: 0; } button:focus { border: 1px solid #333; } ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/src/static/index.html ================================================ Todo App
================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/src/static/js/app.js ================================================ function App() { const { Container, Row, Col } = ReactBootstrap; return ( ); } function TodoListCard() { const [items, setItems] = React.useState(null); React.useEffect(() => { fetch('/items') .then(r => r.json()) .then(setItems); }, []); const onNewItem = React.useCallback( newItem => { setItems([...items, newItem]); }, [items], ); const onItemUpdate = React.useCallback( item => { const index = items.findIndex(i => i.id === item.id); setItems([ ...items.slice(0, index), item, ...items.slice(index + 1), ]); }, [items], ); const onItemRemoval = React.useCallback( item => { const index = items.findIndex(i => i.id === item.id); setItems([...items.slice(0, index), ...items.slice(index + 1)]); }, [items], ); if (items === null) return 'Loading...'; return ( {items.length === 0 && (

No items yet! Add one above!

)} {items.map(item => ( ))}
); } function AddItemForm({ onNewItem }) { const { Form, InputGroup, Button } = ReactBootstrap; const [newItem, setNewItem] = React.useState(''); const [submitting, setSubmitting] = React.useState(false); const submitNewItem = e => { e.preventDefault(); setSubmitting(true); fetch('/items', { method: 'POST', body: JSON.stringify({ name: newItem }), headers: { 'Content-Type': 'application/json' }, }) .then(r => r.json()) .then(item => { onNewItem(item); setSubmitting(false); setNewItem(''); }); }; return (
setNewItem(e.target.value)} type="text" placeholder="New Item" aria-describedby="basic-addon1" />
); } function ItemDisplay({ item, onItemUpdate, onItemRemoval }) { const { Container, Row, Col, Button } = ReactBootstrap; const toggleCompletion = () => { fetch(`/items/${item.id}`, { method: 'PUT', body: JSON.stringify({ name: item.name, completed: !item.completed, }), headers: { 'Content-Type': 'application/json' }, }) .then(r => r.json()) .then(onItemUpdate); }; const removeItem = () => { fetch(`/items/${item.id}`, { method: 'DELETE' }).then(() => onItemRemoval(item), ); }; return ( {item.name} ); } ReactDOM.render(, document.getElementById('root')); ================================================ FILE: home/containers/docker/taskset_docker_containers/task_022_sample_app/app/src/static/js/react-bootstrap.js ================================================ (function webpackUniversalModuleDefinition(root, factory) { if(typeof exports === 'object' && typeof module === 'object') module.exports = factory(require("react"), require("react-dom")); else if(typeof define === 'function' && define.amd) define(["react", "react-dom"], factory); else if(typeof exports === 'object') exports["ReactBootstrap"] = factory(require("react"), require("react-dom")); else root["ReactBootstrap"] = factory(root["React"], root["ReactDOM"]); })(window, function(__WEBPACK_EXTERNAL_MODULE__1__, __WEBPACK_EXTERNAL_MODULE__6__) { return /******/ (function(modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; /******/ /******/ // The require function /******/ function __webpack_require__(moduleId) { /******/ /******/ // Check if module is in cache /******/ if(installedModules[moduleId]) { /******/ return installedModules[moduleId].exports; /******/ } /******/ // Create a new module (and put it into the cache) /******/ var module = installedModules[moduleId] = { /******/ i: moduleId, /******/ l: false, /******/ exports: {} /******/ }; /******/ /******/ // Execute the module function /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); /******/ /******/ // Flag the module as loaded /******/ module.l = true; /******/ /******/ // Return the exports of the module /******/ return module.exports; /******/ } /******/ /******/ /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = modules; /******/ /******/ // expose the module cache /******/ __webpack_require__.c = installedModules; /******/ /******/ // define getter function for harmony exports /******/ __webpack_require__.d = function(exports, name, getter) { /******/ if(!__webpack_require__.o(exports, name)) { /******/ Object.defineProperty(exports, name, { enumerable: true, get: getter }); /******/ } /******/ }; /******/ /******/ // define __esModule on exports /******/ __webpack_require__.r = function(exports) { /******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) { /******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' }); /******/ } /******/ Object.defineProperty(exports, '__esModule', { value: true }); /******/ }; /******/ /******/ // create a fake namespace object /******/ // mode & 1: value is a module id, require it /******/ // mode & 2: merge all properties of value into the ns /******/ // mode & 4: return value when already ns object /******/ // mode & 8|1: behave like require /******/ __webpack_require__.t = function(value, mode) { /******/ if(mode & 1) value = __webpack_require__(value); /******/ if(mode & 8) return value; /******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value; /******/ var ns = Object.create(null); /******/ __webpack_require__.r(ns); /******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value }); /******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key)); /******/ return ns; /******/ }; /******/ /******/ // getDefaultExport function for compatibility with non-harmony modules /******/ __webpack_require__.n = function(module) { /******/ var getter = module && module.__esModule ? /******/ function getDefault() { return module['default']; } : /******/ function getModuleExports() { return module; }; /******/ __webpack_require__.d(getter, 'a', getter); /******/ return getter; /******/ }; /******/ /******/ // Object.prototype.hasOwnProperty.call /******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; /******/ /******/ // __webpack_public_path__ /******/ __webpack_require__.p = ""; /******/ /******/ /******/ // Load entry module and return exports /******/ return __webpack_require__(__webpack_require__.s = 86); /******/ }) /************************************************************************/ /******/ ([ /* 0 */ /***/ (function(module, exports, __webpack_require__) { /** * Copyright (c) 2013-present, Facebook, Inc. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ if (false) { var throwOnDirectAccess, ReactIs; } else { // By explicitly using `prop-types` you are opting into new production behavior. // http://fb.me/prop-types-in-prod module.exports = __webpack_require__(58)(); } /***/ }), /* 1 */ /***/ (function(module, exports) { module.exports = __WEBPACK_EXTERNAL_MODULE__1__; /***/ }), /* 2 */ /***/ (function(module, exports, __webpack_require__) { var __WEBPACK_AMD_DEFINE_ARRAY__, __WEBPACK_AMD_DEFINE_RESULT__;/*! Copyright (c) 2017 Jed Watson. Licensed under the MIT License (MIT), see http://jedwatson.github.io/classnames */ /* global define */ (function () { 'use strict'; var hasOwn = {}.hasOwnProperty; function classNames() { var classes = []; for (var i = 0; i < arguments.length; i++) { var arg = arguments[i]; if (!arg) continue; var argType = typeof arg; if (argType === 'string' || argType === 'number') { classes.push(arg); } else if (Array.isArray(arg) && arg.length) { var inner = classNames.apply(null, arg); if (inner) { classes.push(inner); } } else if (argType === 'object') { for (var key in arg) { if (hasOwn.call(arg, key) && arg[key]) { classes.push(key); } } } } return classes.join(' '); } if ( true && module.exports) { classNames.default = classNames; module.exports = classNames; } else if (true) { // register as 'classnames', consistent with npm package name !(__WEBPACK_AMD_DEFINE_ARRAY__ = [], __WEBPACK_AMD_DEFINE_RESULT__ = (function () { return classNames; }).apply(exports, __WEBPACK_AMD_DEFINE_ARRAY__), __WEBPACK_AMD_DEFINE_RESULT__ !== undefined && (module.exports = __WEBPACK_AMD_DEFINE_RESULT__)); } else {} })(); /***/ }), /* 3 */ /***/ (function(module, exports) { function _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return self; } module.exports = _assertThisInitialized; /***/ }), /* 4 */ /***/ (function(module, exports) { function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; } module.exports = _defineProperty; /***/ }), /* 5 */ /***/ (function(module, exports) { function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } module.exports = _interopRequireDefault; /***/ }), /* 6 */ /***/ (function(module, exports) { module.exports = __WEBPACK_EXTERNAL_MODULE__6__; /***/ }), /* 7 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = uncontrollable; var _react = _interopRequireDefault(__webpack_require__(1)); var _invariant = _interopRequireDefault(__webpack_require__(24)); var Utils = _interopRequireWildcard(__webpack_require__(39)); function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { var desc = Object.defineProperty && Object.getOwnPropertyDescriptor ? Object.getOwnPropertyDescriptor(obj, key) : {}; if (desc.get || desc.set) { Object.defineProperty(newObj, key, desc); } else { newObj[key] = obj[key]; } } } } newObj.default = obj; return newObj; } } function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _extends() { _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); } function _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; } function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; } function uncontrollable(Component, controlledValues, methods) { if (methods === void 0) { methods = []; } var displayName = Component.displayName || Component.name || 'Component'; var canAcceptRef = Utils.canAcceptRef(Component); var controlledProps = Object.keys(controlledValues); var PROPS_TO_OMIT = controlledProps.map(Utils.defaultKey); !(canAcceptRef || !methods.length) ? false ? undefined : invariant(false) : void 0; var UncontrolledComponent = /*#__PURE__*/ function (_React$Component) { _inheritsLoose(UncontrolledComponent, _React$Component); function UncontrolledComponent() { var _this; for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { args[_key] = arguments[_key]; } _this = _React$Component.call.apply(_React$Component, [this].concat(args)) || this; _this.handlers = Object.create(null); controlledProps.forEach(function (propName) { var handlerName = controlledValues[propName]; var handleChange = function handleChange(value) { if (_this.props[handlerName]) { var _this$props; _this._notifying = true; for (var _len2 = arguments.length, args = new Array(_len2 > 1 ? _len2 - 1 : 0), _key2 = 1; _key2 < _len2; _key2++) { args[_key2 - 1] = arguments[_key2]; } (_this$props = _this.props)[handlerName].apply(_this$props, [value].concat(args)); _this._notifying = false; } _this._values[propName] = value; if (!_this.unmounted) _this.forceUpdate(); }; _this.handlers[handlerName] = handleChange; }); if (methods.length) _this.attachRef = function (ref) { _this.inner = ref; }; return _this; } var _proto = UncontrolledComponent.prototype; _proto.shouldComponentUpdate = function shouldComponentUpdate() { //let the forceUpdate trigger the update return !this._notifying; }; _proto.componentWillMount = function componentWillMount() { var _this2 = this; var props = this.props; this._values = Object.create(null); controlledProps.forEach(function (key) { _this2._values[key] = props[Utils.defaultKey(key)]; }); }; _proto.componentWillReceiveProps = function componentWillReceiveProps(nextProps) { var _this3 = this; var props = this.props; controlledProps.forEach(function (key) { /** * If a prop switches from controlled to Uncontrolled * reset its value to the defaultValue */ if (!Utils.isProp(nextProps, key) && Utils.isProp(props, key)) { _this3._values[key] = nextProps[Utils.defaultKey(key)]; } }); }; _proto.componentWillUnmount = function componentWillUnmount() { this.unmounted = true; }; _proto.render = function render() { var _this4 = this; var _this$props2 = this.props, innerRef = _this$props2.innerRef, props = _objectWithoutPropertiesLoose(_this$props2, ["innerRef"]); PROPS_TO_OMIT.forEach(function (prop) { delete props[prop]; }); var newProps = {}; controlledProps.forEach(function (propName) { var propValue = _this4.props[propName]; newProps[propName] = propValue !== undefined ? propValue : _this4._values[propName]; }); return _react.default.createElement(Component, _extends({}, props, newProps, this.handlers, { ref: innerRef || this.attachRef })); }; return UncontrolledComponent; }(_react.default.Component); UncontrolledComponent.displayName = "Uncontrolled(" + displayName + ")"; UncontrolledComponent.propTypes = _extends({ innerRef: function innerRef() {} }, Utils.uncontrolledPropTypes(controlledValues, displayName)); methods.forEach(function (method) { UncontrolledComponent.prototype[method] = function $proxiedMethod() { var _this$inner; return (_this$inner = this.inner)[method].apply(_this$inner, arguments); }; }); var WrappedComponent = UncontrolledComponent; if (_react.default.forwardRef) { WrappedComponent = _react.default.forwardRef(function (props, ref) { return _react.default.createElement(UncontrolledComponent, _extends({}, props, { innerRef: ref })); }); WrappedComponent.propTypes = UncontrolledComponent.propTypes; } WrappedComponent.ControlledComponent = Component; /** * useful when wrapping a Component and you want to control * everything */ WrappedComponent.deferControlTo = function (newComponent, additions, nextMethods) { if (additions === void 0) { additions = {}; } return uncontrollable(newComponent, _extends({}, controlledValues, additions), nextMethods); }; return WrappedComponent; } module.exports = exports["default"]; /***/ }), /* 8 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = useEventCallback; var _react = __webpack_require__(1); var _useCommittedRef = _interopRequireDefault(__webpack_require__(71)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function useEventCallback(fn) { var ref = (0, _useCommittedRef.default)(fn); return (0, _react.useCallback)(function () { return ref.current.apply(void 0, arguments); }, [ref]); } /***/ }), /* 9 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = qsa; // Zepto.js // (c) 2010-2015 Thomas Fuchs // Zepto.js may be freely distributed under the MIT license. var simpleSelectorRE = /^[\w-]*$/; var toArray = Function.prototype.bind.call(Function.prototype.call, [].slice); function qsa(element, selector) { var maybeID = selector[0] === '#', maybeClass = selector[0] === '.', nameOnly = maybeID || maybeClass ? selector.slice(1) : selector, isSimple = simpleSelectorRE.test(nameOnly), found; if (isSimple) { if (maybeID) { element = element.getElementById ? element : document; return (found = element.getElementById(nameOnly)) ? [found] : []; } if (element.getElementsByClassName && maybeClass) return toArray(element.getElementsByClassName(nameOnly)); return toArray(element.getElementsByTagName(selector)); } return toArray(element.querySelectorAll(selector)); } module.exports = exports["default"]; /***/ }), /* 10 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = void 0; var _default = !!(typeof window !== 'undefined' && window.document && window.document.createElement); exports.default = _default; module.exports = exports["default"]; /***/ }), /* 11 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = style; var _camelizeStyle = _interopRequireDefault(__webpack_require__(40)); var _hyphenateStyle = _interopRequireDefault(__webpack_require__(60)); var _getComputedStyle2 = _interopRequireDefault(__webpack_require__(62)); var _removeStyle = _interopRequireDefault(__webpack_require__(63)); var _properties = __webpack_require__(26); var _isTransform = _interopRequireDefault(__webpack_require__(64)); function style(node, property, value) { var css = ''; var transforms = ''; var props = property; if (typeof property === 'string') { if (value === undefined) { return node.style[(0, _camelizeStyle.default)(property)] || (0, _getComputedStyle2.default)(node).getPropertyValue((0, _hyphenateStyle.default)(property)); } else { (props = {})[property] = value; } } Object.keys(props).forEach(function (key) { var value = props[key]; if (!value && value !== 0) { (0, _removeStyle.default)(node, (0, _hyphenateStyle.default)(key)); } else if ((0, _isTransform.default)(key)) { transforms += key + "(" + value + ") "; } else { css += (0, _hyphenateStyle.default)(key) + ": " + value + ";"; } }); if (transforms) { css += _properties.transform + ": " + transforms + ";"; } node.style.cssText += ';' + css; } module.exports = exports["default"]; /***/ }), /* 12 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = exports.EXITING = exports.ENTERED = exports.ENTERING = exports.EXITED = exports.UNMOUNTED = void 0; var PropTypes = _interopRequireWildcard(__webpack_require__(0)); var _react = _interopRequireDefault(__webpack_require__(1)); var _reactDom = _interopRequireDefault(__webpack_require__(6)); var _reactLifecyclesCompat = __webpack_require__(65); var _PropTypes = __webpack_require__(66); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { var desc = Object.defineProperty && Object.getOwnPropertyDescriptor ? Object.getOwnPropertyDescriptor(obj, key) : {}; if (desc.get || desc.set) { Object.defineProperty(newObj, key, desc); } else { newObj[key] = obj[key]; } } } } newObj.default = obj; return newObj; } } function _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; } function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; } var UNMOUNTED = 'unmounted'; exports.UNMOUNTED = UNMOUNTED; var EXITED = 'exited'; exports.EXITED = EXITED; var ENTERING = 'entering'; exports.ENTERING = ENTERING; var ENTERED = 'entered'; exports.ENTERED = ENTERED; var EXITING = 'exiting'; /** * The Transition component lets you describe a transition from one component * state to another _over time_ with a simple declarative API. Most commonly * it's used to animate the mounting and unmounting of a component, but can also * be used to describe in-place transition states as well. * * --- * * **Note**: `Transition` is a platform-agnostic base component. If you're using * transitions in CSS, you'll probably want to use * [`CSSTransition`](https://reactcommunity.org/react-transition-group/css-transition) * instead. It inherits all the features of `Transition`, but contains * additional features necessary to play nice with CSS transitions (hence the * name of the component). * * --- * * By default the `Transition` component does not alter the behavior of the * component it renders, it only tracks "enter" and "exit" states for the * components. It's up to you to give meaning and effect to those states. For * example we can add styles to a component when it enters or exits: * * ```jsx * import { Transition } from 'react-transition-group'; * * const duration = 300; * * const defaultStyle = { * transition: `opacity ${duration}ms ease-in-out`, * opacity: 0, * } * * const transitionStyles = { * entering: { opacity: 0 }, * entered: { opacity: 1 }, * }; * * const Fade = ({ in: inProp }) => ( * * {state => ( *
* I'm a fade Transition! *
* )} *
* ); * ``` * * There are 4 main states a Transition can be in: * - `'entering'` * - `'entered'` * - `'exiting'` * - `'exited'` * * Transition state is toggled via the `in` prop. When `true` the component * begins the "Enter" stage. During this stage, the component will shift from * its current transition state, to `'entering'` for the duration of the * transition and then to the `'entered'` stage once it's complete. Let's take * the following example (we'll use the * [useState](https://reactjs.org/docs/hooks-reference.html#usestate) hook): * * ```jsx * function App() { * const [inProp, setInProp] = useState(false); * return ( *
* * {state => ( * // ... * )} * * *
* ); * } * ``` * * When the button is clicked the component will shift to the `'entering'` state * and stay there for 500ms (the value of `timeout`) before it finally switches * to `'entered'`. * * When `in` is `false` the same thing happens except the state moves from * `'exiting'` to `'exited'`. */ exports.EXITING = EXITING; var Transition = /*#__PURE__*/ function (_React$Component) { _inheritsLoose(Transition, _React$Component); function Transition(props, context) { var _this; _this = _React$Component.call(this, props, context) || this; var parentGroup = context.transitionGroup; // In the context of a TransitionGroup all enters are really appears var appear = parentGroup && !parentGroup.isMounting ? props.enter : props.appear; var initialStatus; _this.appearStatus = null; if (props.in) { if (appear) { initialStatus = EXITED; _this.appearStatus = ENTERING; } else { initialStatus = ENTERED; } } else { if (props.unmountOnExit || props.mountOnEnter) { initialStatus = UNMOUNTED; } else { initialStatus = EXITED; } } _this.state = { status: initialStatus }; _this.nextCallback = null; return _this; } var _proto = Transition.prototype; _proto.getChildContext = function getChildContext() { return { transitionGroup: null // allows for nested Transitions }; }; Transition.getDerivedStateFromProps = function getDerivedStateFromProps(_ref, prevState) { var nextIn = _ref.in; if (nextIn && prevState.status === UNMOUNTED) { return { status: EXITED }; } return null; }; // getSnapshotBeforeUpdate(prevProps) { // let nextStatus = null // if (prevProps !== this.props) { // const { status } = this.state // if (this.props.in) { // if (status !== ENTERING && status !== ENTERED) { // nextStatus = ENTERING // } // } else { // if (status === ENTERING || status === ENTERED) { // nextStatus = EXITING // } // } // } // return { nextStatus } // } _proto.componentDidMount = function componentDidMount() { this.updateStatus(true, this.appearStatus); }; _proto.componentDidUpdate = function componentDidUpdate(prevProps) { var nextStatus = null; if (prevProps !== this.props) { var status = this.state.status; if (this.props.in) { if (status !== ENTERING && status !== ENTERED) { nextStatus = ENTERING; } } else { if (status === ENTERING || status === ENTERED) { nextStatus = EXITING; } } } this.updateStatus(false, nextStatus); }; _proto.componentWillUnmount = function componentWillUnmount() { this.cancelNextCallback(); }; _proto.getTimeouts = function getTimeouts() { var timeout = this.props.timeout; var exit, enter, appear; exit = enter = appear = timeout; if (timeout != null && typeof timeout !== 'number') { exit = timeout.exit; enter = timeout.enter; // TODO: remove fallback for next major appear = timeout.appear !== undefined ? timeout.appear : enter; } return { exit: exit, enter: enter, appear: appear }; }; _proto.updateStatus = function updateStatus(mounting, nextStatus) { if (mounting === void 0) { mounting = false; } if (nextStatus !== null) { // nextStatus will always be ENTERING or EXITING. this.cancelNextCallback(); var node = _reactDom.default.findDOMNode(this); if (nextStatus === ENTERING) { this.performEnter(node, mounting); } else { this.performExit(node); } } else if (this.props.unmountOnExit && this.state.status === EXITED) { this.setState({ status: UNMOUNTED }); } }; _proto.performEnter = function performEnter(node, mounting) { var _this2 = this; var enter = this.props.enter; var appearing = this.context.transitionGroup ? this.context.transitionGroup.isMounting : mounting; var timeouts = this.getTimeouts(); var enterTimeout = appearing ? timeouts.appear : timeouts.enter; // no enter animation skip right to ENTERED // if we are mounting and running this it means appear _must_ be set if (!mounting && !enter) { this.safeSetState({ status: ENTERED }, function () { _this2.props.onEntered(node); }); return; } this.props.onEnter(node, appearing); this.safeSetState({ status: ENTERING }, function () { _this2.props.onEntering(node, appearing); _this2.onTransitionEnd(node, enterTimeout, function () { _this2.safeSetState({ status: ENTERED }, function () { _this2.props.onEntered(node, appearing); }); }); }); }; _proto.performExit = function performExit(node) { var _this3 = this; var exit = this.props.exit; var timeouts = this.getTimeouts(); // no exit animation skip right to EXITED if (!exit) { this.safeSetState({ status: EXITED }, function () { _this3.props.onExited(node); }); return; } this.props.onExit(node); this.safeSetState({ status: EXITING }, function () { _this3.props.onExiting(node); _this3.onTransitionEnd(node, timeouts.exit, function () { _this3.safeSetState({ status: EXITED }, function () { _this3.props.onExited(node); }); }); }); }; _proto.cancelNextCallback = function cancelNextCallback() { if (this.nextCallback !== null) { this.nextCallback.cancel(); this.nextCallback = null; } }; _proto.safeSetState = function safeSetState(nextState, callback) { // This shouldn't be necessary, but there are weird race conditions with // setState callbacks and unmounting in testing, so always make sure that // we can cancel any pending setState callbacks after we unmount. callback = this.setNextCallback(callback); this.setState(nextState, callback); }; _proto.setNextCallback = function setNextCallback(callback) { var _this4 = this; var active = true; this.nextCallback = function (event) { if (active) { active = false; _this4.nextCallback = null; callback(event); } }; this.nextCallback.cancel = function () { active = false; }; return this.nextCallback; }; _proto.onTransitionEnd = function onTransitionEnd(node, timeout, handler) { this.setNextCallback(handler); var doesNotHaveTimeoutOrListener = timeout == null && !this.props.addEndListener; if (!node || doesNotHaveTimeoutOrListener) { setTimeout(this.nextCallback, 0); return; } if (this.props.addEndListener) { this.props.addEndListener(node, this.nextCallback); } if (timeout != null) { setTimeout(this.nextCallback, timeout); } }; _proto.render = function render() { var status = this.state.status; if (status === UNMOUNTED) { return null; } var _this$props = this.props, children = _this$props.children, childProps = _objectWithoutPropertiesLoose(_this$props, ["children"]); // filter props for Transtition delete childProps.in; delete childProps.mountOnEnter; delete childProps.unmountOnExit; delete childProps.appear; delete childProps.enter; delete childProps.exit; delete childProps.timeout; delete childProps.addEndListener; delete childProps.onEnter; delete childProps.onEntering; delete childProps.onEntered; delete childProps.onExit; delete childProps.onExiting; delete childProps.onExited; if (typeof children === 'function') { return children(status, childProps); } var child = _react.default.Children.only(children); return _react.default.cloneElement(child, childProps); }; return Transition; }(_react.default.Component); Transition.contextTypes = { transitionGroup: PropTypes.object }; Transition.childContextTypes = { transitionGroup: function transitionGroup() {} }; Transition.propTypes = false ? undefined : {}; function noop() {} Transition.defaultProps = { in: false, mountOnEnter: false, unmountOnExit: false, appear: false, enter: true, exit: true, onEnter: noop, onEntering: noop, onEntered: noop, onExit: noop, onExiting: noop, onExited: noop }; Transition.UNMOUNTED = 0; Transition.EXITED = 1; Transition.ENTERING = 2; Transition.ENTERED = 3; Transition.EXITING = 4; var _default = (0, _reactLifecyclesCompat.polyfill)(Transition); exports.default = _default; /***/ }), /* 13 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.default = isRequiredForA11y; function isRequiredForA11y(validator) { return function validate(props, propName, componentName, location, propFullName) { var componentNameSafe = componentName || '<>'; var propFullNameSafe = propFullName || propName; if (props[propName] == null) { return new Error('The ' + location + ' `' + propFullNameSafe + '` is required to make ' + ('`' + componentNameSafe + '` accessible for users of assistive ') + 'technologies such as screen readers.'); } for (var _len = arguments.length, args = Array(_len > 5 ? _len - 5 : 0), _key = 5; _key < _len; _key++) { args[_key - 5] = arguments[_key]; } return validator.apply(undefined, [props, propName, componentName, location, propFullName].concat(args)); }; } module.exports = exports['default']; /***/ }), /* 14 */ /***/ (function(module, exports) { function _extends() { module.exports = _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); } module.exports = _extends; /***/ }), /* 15 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = useUncontrolled; var _react = __webpack_require__(1); var Utils = _interopRequireWildcard(__webpack_require__(39)); function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { var desc = Object.defineProperty && Object.getOwnPropertyDescriptor ? Object.getOwnPropertyDescriptor(obj, key) : {}; if (desc.get || desc.set) { Object.defineProperty(newObj, key, desc); } else { newObj[key] = obj[key]; } } } } newObj.default = obj; return newObj; } } function _extends() { _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); } function _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; } function _toPropertyKey(arg) { var key = _toPrimitive(arg, "string"); return typeof key === "symbol" ? key : String(key); } function _toPrimitive(input, hint) { if (typeof input !== "object" || input === null) return input; var prim = input[Symbol.toPrimitive]; if (prim !== undefined) { var res = prim.call(input, hint || "default"); if (typeof res !== "object") return res; throw new TypeError("@@toPrimitive must return a primitive value."); } return (hint === "string" ? String : Number)(input); } function useUncontrolled(props, config) { return Object.keys(config).reduce(function (result, fieldName) { var _extends2; var defaultValue = result[Utils.defaultKey(fieldName)], propsValue = result[fieldName], rest = _objectWithoutPropertiesLoose(result, [Utils.defaultKey(fieldName), fieldName].map(_toPropertyKey)); var handlerName = config[fieldName]; var prevProps = (0, _react.useRef)({}); var _useState = (0, _react.useState)(defaultValue), stateValue = _useState[0], setState = _useState[1]; var isProp = Utils.isProp(props, fieldName); var wasProp = Utils.isProp(prevProps.current, fieldName); prevProps.current = props; /** * If a prop switches from controlled to Uncontrolled * reset its value to the defaultValue */ if (!isProp && wasProp) { setState(defaultValue); } var propsHandler = props[handlerName]; var handler = (0, _react.useCallback)(function (value) { for (var _len = arguments.length, args = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) { args[_key - 1] = arguments[_key]; } if (propsHandler) propsHandler.apply(void 0, [value].concat(args)); setState(value); }, [setState, propsHandler]); return _extends({}, rest, (_extends2 = {}, _extends2[fieldName] = isProp ? propsValue : stateValue, _extends2[handlerName] = handler, _extends2)); }, props); } module.exports = exports["default"]; /***/ }), /* 16 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = ownerDocument; function ownerDocument(node) { return node && node.ownerDocument || document; } module.exports = exports["default"]; /***/ }), /* 17 */ /***/ (function(module, exports) { function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; } module.exports = _inheritsLoose; /***/ }), /* 18 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = scrollbarSize; var _inDOM = _interopRequireDefault(__webpack_require__(10)); var size; function scrollbarSize(recalc) { if (!size && size !== 0 || recalc) { if (_inDOM.default) { var scrollDiv = document.createElement('div'); scrollDiv.style.position = 'absolute'; scrollDiv.style.top = '-9999px'; scrollDiv.style.width = '50px'; scrollDiv.style.height = '50px'; scrollDiv.style.overflow = 'scroll'; document.body.appendChild(scrollDiv); size = scrollDiv.offsetWidth - scrollDiv.clientWidth; document.body.removeChild(scrollDiv); } } return size; } module.exports = exports["default"]; /***/ }), /* 19 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.isRequiredForA11y = exports.elementType = exports.deprecated = exports.componentOrElement = exports.all = undefined; var _all = __webpack_require__(34); var _all2 = _interopRequireDefault(_all); var _componentOrElement = __webpack_require__(20); var _componentOrElement2 = _interopRequireDefault(_componentOrElement); var _deprecated = __webpack_require__(67); var _deprecated2 = _interopRequireDefault(_deprecated); var _elementType = __webpack_require__(28); var _elementType2 = _interopRequireDefault(_elementType); var _isRequiredForA11y = __webpack_require__(13); var _isRequiredForA11y2 = _interopRequireDefault(_isRequiredForA11y); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } exports.all = _all2.default; exports.componentOrElement = _componentOrElement2.default; exports.deprecated = _deprecated2.default; exports.elementType = _elementType2.default; exports.isRequiredForA11y = _isRequiredForA11y2.default; /***/ }), /* 20 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; var _react = __webpack_require__(1); var _react2 = _interopRequireDefault(_react); var _createChainableTypeChecker = __webpack_require__(27); var _createChainableTypeChecker2 = _interopRequireDefault(_createChainableTypeChecker); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function validate(props, propName, componentName, location, propFullName) { var propValue = props[propName]; var propType = typeof propValue === 'undefined' ? 'undefined' : _typeof(propValue); if (_react2.default.isValidElement(propValue)) { return new Error('Invalid ' + location + ' `' + propFullName + '` of type ReactElement ' + ('supplied to `' + componentName + '`, expected a ReactComponent or a ') + 'DOMElement. You can usually obtain a ReactComponent or DOMElement ' + 'from a ReactElement by attaching a ref to it.'); } if ((propType !== 'object' || typeof propValue.render !== 'function') && propValue.nodeType !== 1) { return new Error('Invalid ' + location + ' `' + propFullName + '` of value `' + propValue + '` ' + ('supplied to `' + componentName + '`, expected a ReactComponent or a ') + 'DOMElement.'); } return null; } exports.default = (0, _createChainableTypeChecker2.default)(validate); module.exports = exports['default']; /***/ }), /* 21 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; /** * Copyright (c) 2014-present, Facebook, Inc. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ /** * Similar to invariant but only logs a warning if the condition is not met. * This can be used to log issues in development environments in critical * paths. Removing the logging code for production environments will keep the * same logic and follow the same code paths. */ var __DEV__ = "production" !== 'production'; var warning = function () {}; if (__DEV__) { var printWarning = function printWarning(format, args) { var len = arguments.length; args = new Array(len > 1 ? len - 1 : 0); for (var key = 1; key < len; key++) { args[key - 1] = arguments[key]; } var argIndex = 0; var message = 'Warning: ' + format.replace(/%s/g, function () { return args[argIndex++]; }); if (typeof console !== 'undefined') { console.error(message); } try { // --- Welcome to debugging React --- // This error was thrown as a convenience so that you can use this stack // to find the callsite that caused this warning to fire. throw new Error(message); } catch (x) {} }; warning = function (condition, format, args) { var len = arguments.length; args = new Array(len > 2 ? len - 2 : 0); for (var key = 2; key < len; key++) { args[key - 2] = arguments[key]; } if (format === undefined) { throw new Error('`warning(condition, format, ...args)` requires a warning ' + 'message argument'); } if (!condition) { printWarning.apply(null, [format].concat(args)); } }; } module.exports = warning; /***/ }), /* 22 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = void 0; var _inDOM = _interopRequireDefault(__webpack_require__(10)); var _default = function () { // HTML DOM and SVG DOM may have different support levels, // so we need to check on context instead of a document root element. return _inDOM.default ? function (context, node) { if (context.contains) { return context.contains(node); } else if (context.compareDocumentPosition) { return context === node || !!(context.compareDocumentPosition(node) & 16); } else { return fallback(context, node); } } : fallback; }(); exports.default = _default; function fallback(context, node) { if (node) do { if (node === context) return true; } while (node = node.parentNode); return false; } module.exports = exports["default"]; /***/ }), /* 23 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = void 0; var _properties = _interopRequireDefault(__webpack_require__(26)); var _style = _interopRequireDefault(__webpack_require__(11)); function onEnd(node, handler, duration) { var fakeEvent = { target: node, currentTarget: node }, backup; if (!_properties.default.end) duration = 0;else if (duration == null) duration = parseDuration(node) || 0; if (_properties.default.end) { node.addEventListener(_properties.default.end, done, false); backup = setTimeout(function () { return done(fakeEvent); }, (duration || 100) * 1.5); } else setTimeout(done.bind(null, fakeEvent), 0); function done(event) { if (event.target !== event.currentTarget) return; clearTimeout(backup); event.target.removeEventListener(_properties.default.end, done); handler.call(this); } } onEnd._parseDuration = parseDuration; var _default = onEnd; exports.default = _default; function parseDuration(node) { var str = (0, _style.default)(node, _properties.default.duration), mult = str.indexOf('ms') === -1 ? 1000 : 1; return parseFloat(str) * mult; } module.exports = exports["default"]; /***/ }), /* 24 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; /** * Copyright (c) 2013-present, Facebook, Inc. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ /** * Use invariant() to assert state which your program assumes to be true. * * Provide sprintf-style format (only %s is supported) and arguments * to provide information about what broke and what you were * expecting. * * The invariant message will be stripped in production, but the invariant * will remain to ensure logic does not differ in production. */ var invariant = function (condition, format, a, b, c, d, e, f) { if (false) {} if (!condition) { var error; if (format === undefined) { error = new Error('Minified exception occurred; use the non-minified dev environment ' + 'for the full error message and additional helpful warnings.'); } else { var args = [a, b, c, d, e, f]; var argIndex = 0; error = new Error(format.replace(/%s/g, function () { return args[argIndex++]; })); error.name = 'Invariant Violation'; } error.framesToPop = 1; // we don't care about invariant's own frame throw error; } }; module.exports = invariant; /***/ }), /* 25 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = void 0; var _on = _interopRequireDefault(__webpack_require__(43)); exports.on = _on.default; var _off = _interopRequireDefault(__webpack_require__(44)); exports.off = _off.default; var _filter = _interopRequireDefault(__webpack_require__(78)); exports.filter = _filter.default; var _listen = _interopRequireDefault(__webpack_require__(30)); exports.listen = _listen.default; var _default = { on: _on.default, off: _off.default, filter: _filter.default, listen: _listen.default }; exports.default = _default; /***/ }), /* 26 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = exports.animationEnd = exports.animationDelay = exports.animationTiming = exports.animationDuration = exports.animationName = exports.transitionEnd = exports.transitionDuration = exports.transitionDelay = exports.transitionTiming = exports.transitionProperty = exports.transform = void 0; var _inDOM = _interopRequireDefault(__webpack_require__(10)); var transform = 'transform'; exports.transform = transform; var prefix, transitionEnd, animationEnd; exports.animationEnd = animationEnd; exports.transitionEnd = transitionEnd; var transitionProperty, transitionDuration, transitionTiming, transitionDelay; exports.transitionDelay = transitionDelay; exports.transitionTiming = transitionTiming; exports.transitionDuration = transitionDuration; exports.transitionProperty = transitionProperty; var animationName, animationDuration, animationTiming, animationDelay; exports.animationDelay = animationDelay; exports.animationTiming = animationTiming; exports.animationDuration = animationDuration; exports.animationName = animationName; if (_inDOM.default) { var _getTransitionPropert = getTransitionProperties(); prefix = _getTransitionPropert.prefix; exports.transitionEnd = transitionEnd = _getTransitionPropert.transitionEnd; exports.animationEnd = animationEnd = _getTransitionPropert.animationEnd; exports.transform = transform = prefix + "-" + transform; exports.transitionProperty = transitionProperty = prefix + "-transition-property"; exports.transitionDuration = transitionDuration = prefix + "-transition-duration"; exports.transitionDelay = transitionDelay = prefix + "-transition-delay"; exports.transitionTiming = transitionTiming = prefix + "-transition-timing-function"; exports.animationName = animationName = prefix + "-animation-name"; exports.animationDuration = animationDuration = prefix + "-animation-duration"; exports.animationTiming = animationTiming = prefix + "-animation-delay"; exports.animationDelay = animationDelay = prefix + "-animation-timing-function"; } var _default = { transform: transform, end: transitionEnd, property: transitionProperty, timing: transitionTiming, delay: transitionDelay, duration: transitionDuration }; exports.default = _default; function getTransitionProperties() { var style = document.createElement('div').style; var vendorMap = { O: function O(e) { return "o" + e.toLowerCase(); }, Moz: function Moz(e) { return e.toLowerCase(); }, Webkit: function Webkit(e) { return "webkit" + e; }, ms: function ms(e) { return "MS" + e; } }; var vendors = Object.keys(vendorMap); var transitionEnd, animationEnd; var prefix = ''; for (var i = 0; i < vendors.length; i++) { var vendor = vendors[i]; if (vendor + "TransitionProperty" in style) { prefix = "-" + vendor.toLowerCase(); transitionEnd = vendorMap[vendor]('TransitionEnd'); animationEnd = vendorMap[vendor]('AnimationEnd'); break; } } if (!transitionEnd && 'transitionProperty' in style) transitionEnd = 'transitionend'; if (!animationEnd && 'animationName' in style) animationEnd = 'animationend'; style = null; return { animationEnd: animationEnd, transitionEnd: transitionEnd, prefix: prefix }; } /***/ }), /* 27 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.default = createChainableTypeChecker; /** * Copyright 2013-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. */ // Mostly taken from ReactPropTypes. function createChainableTypeChecker(validate) { function checkType(isRequired, props, propName, componentName, location, propFullName) { var componentNameSafe = componentName || '<>'; var propFullNameSafe = propFullName || propName; if (props[propName] == null) { if (isRequired) { return new Error('Required ' + location + ' `' + propFullNameSafe + '` was not specified ' + ('in `' + componentNameSafe + '`.')); } return null; } for (var _len = arguments.length, args = Array(_len > 6 ? _len - 6 : 0), _key = 6; _key < _len; _key++) { args[_key - 6] = arguments[_key]; } return validate.apply(undefined, [props, propName, componentNameSafe, location, propFullNameSafe].concat(args)); } var chainedCheckType = checkType.bind(null, false); chainedCheckType.isRequired = checkType.bind(null, true); return chainedCheckType; } module.exports = exports['default']; /***/ }), /* 28 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); var _react = __webpack_require__(1); var _react2 = _interopRequireDefault(_react); var _reactIs = __webpack_require__(69); var _createChainableTypeChecker = __webpack_require__(27); var _createChainableTypeChecker2 = _interopRequireDefault(_createChainableTypeChecker); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function elementType(props, propName, componentName, location, propFullName) { var propValue = props[propName]; if (_react2.default.isValidElement(propValue)) { return new Error('Invalid ' + location + ' `' + propFullName + '` of type ReactElement ' + ('supplied to `' + componentName + '`,expected an element type (a string ') + ', component class, or function component).'); } if (!(0, _reactIs.isValidElementType)(propValue)) { return new Error('Invalid ' + location + ' `' + propFullName + '` of value `' + propValue + '` ' + ('supplied to `' + componentName + '`, expected an element type (a string ') + ', component class, or function component).'); } return null; } exports.default = (0, _createChainableTypeChecker2.default)(elementType); module.exports = exports['default']; /***/ }), /* 29 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = void 0; var _react = _interopRequireDefault(__webpack_require__(1)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } var DropdownContext = _react.default.createContext({ menuRef: function menuRef() {}, toggleRef: function toggleRef() {}, onToggle: function onToggle() {}, toggleNode: undefined, alignEnd: null, show: null, drop: null }); var _default = DropdownContext; exports.default = _default; module.exports = exports.default; /***/ }), /* 30 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = void 0; var _inDOM = _interopRequireDefault(__webpack_require__(10)); var _on = _interopRequireDefault(__webpack_require__(43)); var _off = _interopRequireDefault(__webpack_require__(44)); var listen = function listen() {}; if (_inDOM.default) { listen = function listen(node, eventName, handler, capture) { (0, _on.default)(node, eventName, handler, capture); return function () { (0, _off.default)(node, eventName, handler, capture); }; }; } var _default = listen; exports.default = _default; module.exports = exports["default"]; /***/ }), /* 31 */ /***/ (function(module, __webpack_exports__, __webpack_require__) { "use strict"; __webpack_require__.r(__webpack_exports__); // EXTERNAL MODULE: ./node_modules/@babel/runtime/helpers/objectWithoutPropertiesLoose.js var objectWithoutPropertiesLoose = __webpack_require__(53); var objectWithoutPropertiesLoose_default = /*#__PURE__*/__webpack_require__.n(objectWithoutPropertiesLoose); // EXTERNAL MODULE: ./node_modules/@babel/runtime/helpers/extends.js var helpers_extends = __webpack_require__(14); var extends_default = /*#__PURE__*/__webpack_require__.n(helpers_extends); // EXTERNAL MODULE: ./node_modules/@babel/runtime/helpers/inheritsLoose.js var inheritsLoose = __webpack_require__(17); var inheritsLoose_default = /*#__PURE__*/__webpack_require__.n(inheritsLoose); // EXTERNAL MODULE: ./node_modules/@babel/runtime/helpers/assertThisInitialized.js var assertThisInitialized = __webpack_require__(3); var assertThisInitialized_default = /*#__PURE__*/__webpack_require__.n(assertThisInitialized); // EXTERNAL MODULE: ./node_modules/@babel/runtime/helpers/defineProperty.js var defineProperty = __webpack_require__(4); var defineProperty_default = /*#__PURE__*/__webpack_require__.n(defineProperty); // EXTERNAL MODULE: external {"root":"React","commonjs2":"react","commonjs":"react","amd":"react"} var external_root_React_commonjs2_react_commonjs_react_amd_react_ = __webpack_require__(1); // EXTERNAL MODULE: ./node_modules/popper.js/dist/esm/popper.js var popper = __webpack_require__(38); // EXTERNAL MODULE: ./node_modules/react-popper/node_modules/create-react-context/lib/index.js var lib = __webpack_require__(54); var lib_default = /*#__PURE__*/__webpack_require__.n(lib); // CONCATENATED MODULE: ./node_modules/react-popper/lib/esm/Manager.js var ManagerContext = lib_default()({ setReferenceNode: undefined, referenceNode: undefined }); var Manager_Manager = /*#__PURE__*/ function (_React$Component) { inheritsLoose_default()(Manager, _React$Component); function Manager() { var _this; _this = _React$Component.call(this) || this; defineProperty_default()(assertThisInitialized_default()(assertThisInitialized_default()(_this)), "setReferenceNode", function (referenceNode) { if (!referenceNode || _this.state.context.referenceNode === referenceNode) { return; } _this.setState(function (_ref) { var context = _ref.context; return { context: extends_default()({}, context, { referenceNode: referenceNode }) }; }); }); _this.state = { context: { setReferenceNode: _this.setReferenceNode, referenceNode: undefined } }; return _this; } var _proto = Manager.prototype; _proto.render = function render() { return external_root_React_commonjs2_react_commonjs_react_amd_react_["createElement"](ManagerContext.Provider, { value: this.state.context }, this.props.children); }; return Manager; }(external_root_React_commonjs2_react_commonjs_react_amd_react_["Component"]); // CONCATENATED MODULE: ./node_modules/react-popper/lib/esm/utils.js /** * Takes an argument and if it's an array, returns the first item in the array, * otherwise returns the argument. Used for Preact compatibility. */ var unwrapArray = function unwrapArray(arg) { return Array.isArray(arg) ? arg[0] : arg; }; /** * Takes a maybe-undefined function and arbitrary args and invokes the function * only if it is defined. */ var safeInvoke = function safeInvoke(fn) { if (typeof fn === "function") { for (var _len = arguments.length, args = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) { args[_key - 1] = arguments[_key]; } return fn.apply(void 0, args); } }; // CONCATENATED MODULE: ./node_modules/react-popper/lib/esm/Popper.js var initialStyle = { position: 'absolute', top: 0, left: 0, opacity: 0, pointerEvents: 'none' }; var initialArrowStyle = {}; var Popper_InnerPopper = /*#__PURE__*/ function (_React$Component) { inheritsLoose_default()(InnerPopper, _React$Component); function InnerPopper() { var _this; for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { args[_key] = arguments[_key]; } _this = _React$Component.call.apply(_React$Component, [this].concat(args)) || this; defineProperty_default()(assertThisInitialized_default()(assertThisInitialized_default()(_this)), "state", { data: undefined, placement: undefined }); defineProperty_default()(assertThisInitialized_default()(assertThisInitialized_default()(_this)), "popperInstance", void 0); defineProperty_default()(assertThisInitialized_default()(assertThisInitialized_default()(_this)), "popperNode", null); defineProperty_default()(assertThisInitialized_default()(assertThisInitialized_default()(_this)), "arrowNode", null); defineProperty_default()(assertThisInitialized_default()(assertThisInitialized_default()(_this)), "setPopperNode", function (popperNode) { if (!popperNode || _this.popperNode === popperNode) return; safeInvoke(_this.props.innerRef, popperNode); _this.popperNode = popperNode; _this.updatePopperInstance(); }); defineProperty_default()(assertThisInitialized_default()(assertThisInitialized_default()(_this)), "setArrowNode", function (arrowNode) { _this.arrowNode = arrowNode; }); defineProperty_default()(assertThisInitialized_default()(assertThisInitialized_default()(_this)), "updateStateModifier", { enabled: true, order: 900, fn: function fn(data) { var placement = data.placement; _this.setState({ data: data, placement: placement }); return data; } }); defineProperty_default()(assertThisInitialized_default()(assertThisInitialized_default()(_this)), "getOptions", function () { return { placement: _this.props.placement, eventsEnabled: _this.props.eventsEnabled, positionFixed: _this.props.positionFixed, modifiers: extends_default()({}, _this.props.modifiers, { arrow: extends_default()({}, _this.props.modifiers && _this.props.modifiers.arrow, { enabled: !!_this.arrowNode, element: _this.arrowNode }), applyStyle: { enabled: false }, updateStateModifier: _this.updateStateModifier }) }; }); defineProperty_default()(assertThisInitialized_default()(assertThisInitialized_default()(_this)), "getPopperStyle", function () { return !_this.popperNode || !_this.state.data ? initialStyle : extends_default()({ position: _this.state.data.offsets.popper.position }, _this.state.data.styles); }); defineProperty_default()(assertThisInitialized_default()(assertThisInitialized_default()(_this)), "getPopperPlacement", function () { return !_this.state.data ? undefined : _this.state.placement; }); defineProperty_default()(assertThisInitialized_default()(assertThisInitialized_default()(_this)), "getArrowStyle", function () { return !_this.arrowNode || !_this.state.data ? initialArrowStyle : _this.state.data.arrowStyles; }); defineProperty_default()(assertThisInitialized_default()(assertThisInitialized_default()(_this)), "getOutOfBoundariesState", function () { return _this.state.data ? _this.state.data.hide : undefined; }); defineProperty_default()(assertThisInitialized_default()(assertThisInitialized_default()(_this)), "destroyPopperInstance", function () { if (!_this.popperInstance) return; _this.popperInstance.destroy(); _this.popperInstance = null; }); defineProperty_default()(assertThisInitialized_default()(assertThisInitialized_default()(_this)), "updatePopperInstance", function () { _this.destroyPopperInstance(); var _assertThisInitialize = assertThisInitialized_default()(assertThisInitialized_default()(_this)), popperNode = _assertThisInitialize.popperNode; var referenceElement = _this.props.referenceElement; if (!referenceElement || !popperNode) return; _this.popperInstance = new popper["a" /* default */](referenceElement, popperNode, _this.getOptions()); }); defineProperty_default()(assertThisInitialized_default()(assertThisInitialized_default()(_this)), "scheduleUpdate", function () { if (_this.popperInstance) { _this.popperInstance.scheduleUpdate(); } }); return _this; } var _proto = InnerPopper.prototype; _proto.componentDidUpdate = function componentDidUpdate(prevProps, prevState) { // If the Popper.js options have changed, update the instance (destroy + create) if (this.props.placement !== prevProps.placement || this.props.referenceElement !== prevProps.referenceElement || this.props.positionFixed !== prevProps.positionFixed) { this.updatePopperInstance(); } else if (this.props.eventsEnabled !== prevProps.eventsEnabled && this.popperInstance) { this.props.eventsEnabled ? this.popperInstance.enableEventListeners() : this.popperInstance.disableEventListeners(); } // A placement difference in state means popper determined a new placement // apart from the props value. By the time the popper element is rendered with // the new position Popper has already measured it, if the place change triggers // a size change it will result in a misaligned popper. So we schedule an update to be sure. if (prevState.placement !== this.state.placement) { this.scheduleUpdate(); } }; _proto.componentWillUnmount = function componentWillUnmount() { safeInvoke(this.props.innerRef, null); this.destroyPopperInstance(); }; _proto.render = function render() { return unwrapArray(this.props.children)({ ref: this.setPopperNode, style: this.getPopperStyle(), placement: this.getPopperPlacement(), outOfBoundaries: this.getOutOfBoundariesState(), scheduleUpdate: this.scheduleUpdate, arrowProps: { ref: this.setArrowNode, style: this.getArrowStyle() } }); }; return InnerPopper; }(external_root_React_commonjs2_react_commonjs_react_amd_react_["Component"]); defineProperty_default()(Popper_InnerPopper, "defaultProps", { placement: 'bottom', eventsEnabled: true, referenceElement: undefined, positionFixed: false }); var placements = popper["a" /* default */].placements; function Popper(_ref) { var referenceElement = _ref.referenceElement, props = objectWithoutPropertiesLoose_default()(_ref, ["referenceElement"]); return external_root_React_commonjs2_react_commonjs_react_amd_react_["createElement"](ManagerContext.Consumer, null, function (_ref2) { var referenceNode = _ref2.referenceNode; return external_root_React_commonjs2_react_commonjs_react_amd_react_["createElement"](Popper_InnerPopper, extends_default()({ referenceElement: referenceElement !== undefined ? referenceElement : referenceNode }, props)); }); } // EXTERNAL MODULE: ./node_modules/warning/warning.js var warning = __webpack_require__(21); var warning_default = /*#__PURE__*/__webpack_require__.n(warning); // CONCATENATED MODULE: ./node_modules/react-popper/lib/esm/Reference.js var Reference_InnerReference = /*#__PURE__*/ function (_React$Component) { inheritsLoose_default()(InnerReference, _React$Component); function InnerReference() { var _this; for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { args[_key] = arguments[_key]; } _this = _React$Component.call.apply(_React$Component, [this].concat(args)) || this; defineProperty_default()(assertThisInitialized_default()(assertThisInitialized_default()(_this)), "refHandler", function (node) { safeInvoke(_this.props.innerRef, node); safeInvoke(_this.props.setReferenceNode, node); }); return _this; } var _proto = InnerReference.prototype; _proto.render = function render() { warning_default()(Boolean(this.props.setReferenceNode), '`Reference` should not be used outside of a `Manager` component.'); return unwrapArray(this.props.children)({ ref: this.refHandler }); }; return InnerReference; }(external_root_React_commonjs2_react_commonjs_react_amd_react_["Component"]); function Reference(props) { return external_root_React_commonjs2_react_commonjs_react_amd_react_["createElement"](ManagerContext.Consumer, null, function (_ref) { var setReferenceNode = _ref.setReferenceNode; return external_root_React_commonjs2_react_commonjs_react_amd_react_["createElement"](Reference_InnerReference, extends_default()({ setReferenceNode: setReferenceNode }, props)); }); } // CONCATENATED MODULE: ./node_modules/react-popper/lib/esm/index.js /* concated harmony reexport Popper */__webpack_require__.d(__webpack_exports__, "Popper", function() { return Popper; }); /* concated harmony reexport placements */__webpack_require__.d(__webpack_exports__, "placements", function() { return placements; }); /* concated harmony reexport Manager */__webpack_require__.d(__webpack_exports__, "Manager", function() { return Manager_Manager; }); /* concated harmony reexport Reference */__webpack_require__.d(__webpack_exports__, "Reference", function() { return Reference; }); // Public components // Public types /***/ }), /* 32 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = forwardRef; var _react = _interopRequireDefault(__webpack_require__(1)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function forwardRef(renderFn, _temp) { var _ref = _temp === void 0 ? {} : _temp, propTypes = _ref.propTypes, defaultProps = _ref.defaultProps, _ref$allowFallback = _ref.allowFallback, allowFallback = _ref$allowFallback === void 0 ? false : _ref$allowFallback, _ref$displayName = _ref.displayName, displayName = _ref$displayName === void 0 ? renderFn.name || renderFn.displayName : _ref$displayName; var render = function render(props, ref) { return renderFn(props, ref); }; return Object.assign(_react.default.forwardRef || !allowFallback ? _react.default.forwardRef(render) : function (props) { return render(props, null); }, { displayName: displayName, propTypes: propTypes, defaultProps: defaultProps }); } /***/ }), /* 33 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = camelize; var rHyphen = /-(.)/g; function camelize(string) { return string.replace(rHyphen, function (_, chr) { return chr.toUpperCase(); }); } module.exports = exports["default"]; /***/ }), /* 34 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.default = all; var _createChainableTypeChecker = __webpack_require__(27); var _createChainableTypeChecker2 = _interopRequireDefault(_createChainableTypeChecker); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function all() { for (var _len = arguments.length, validators = Array(_len), _key = 0; _key < _len; _key++) { validators[_key] = arguments[_key]; } function allPropTypes() { for (var _len2 = arguments.length, args = Array(_len2), _key2 = 0; _key2 < _len2; _key2++) { args[_key2] = arguments[_key2]; } var error = null; validators.forEach(function (validator) { if (error != null) { return; } var result = validator.apply(undefined, args); if (result != null) { error = result; } }); return error; } return (0, _createChainableTypeChecker2.default)(allPropTypes); } module.exports = exports['default']; /***/ }), /* 35 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = void 0; var _propTypes = _interopRequireDefault(__webpack_require__(0)); var _react = _interopRequireDefault(__webpack_require__(1)); var _reactPopper = __webpack_require__(31); var _DropdownContext = _interopRequireDefault(__webpack_require__(29)); var _RootCloseWrapper = _interopRequireDefault(__webpack_require__(42)); var _mapContextToProps = _interopRequireDefault(__webpack_require__(77)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; } function _extends() { _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); } function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; } var DropdownMenu = /*#__PURE__*/ function (_React$Component) { _inheritsLoose(DropdownMenu, _React$Component); function DropdownMenu() { var _this; for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { args[_key] = arguments[_key]; } _this = _React$Component.call.apply(_React$Component, [this].concat(args)) || this; _this.state = { toggleId: null }; _this.popperIsInitialized = false; _this.handleClose = function (e) { if (!_this.props.onToggle) return; _this.props.onToggle(false, e); }; return _this; } var _proto = DropdownMenu.prototype; _proto.getSnapshotBeforeUpdate = function getSnapshotBeforeUpdate(prevProps) { // If, to the best we can tell, this update won't reinitialize popper, // manually schedule an update var shouldUpdatePopper = !prevProps.show && this.props.show && this.popperIsInitialized && // a new reference node will already trigger this internally prevProps.toggleNode === this.props.toggleNode; if (this.props.show && this.props.usePopper && !this.popperIsInitialized) { this.popperIsInitialized = true; } return !!shouldUpdatePopper; }; _proto.componentDidUpdate = function componentDidUpdate(_, __, shouldUpdatePopper) { if (shouldUpdatePopper && this.scheduleUpdate) { this.scheduleUpdate(); } }; _proto.render = function render() { var _this2 = this; var _this$props = this.props, show = _this$props.show, flip = _this$props.flip, menuRef = _this$props.menuRef, alignEnd = _this$props.alignEnd, drop = _this$props.drop, usePopper = _this$props.usePopper, toggleNode = _this$props.toggleNode, rootCloseEvent = _this$props.rootCloseEvent, _this$props$popperCon = _this$props.popperConfig, popperConfig = _this$props$popperCon === void 0 ? {} : _this$props$popperCon; var placement = alignEnd ? 'bottom-end' : 'bottom-start'; if (drop === 'up') placement = alignEnd ? 'top-end' : 'top-start'; if (drop === 'right') placement = alignEnd ? 'right-end' : 'right-start'; if (drop === 'left') placement = alignEnd ? 'left-end' : 'left-start'; var menu = null; var menuProps = { ref: menuRef, 'aria-labelledby': toggleNode && toggleNode.id }; var childArgs = { show: show, alignEnd: alignEnd, close: this.handleClose }; if (!usePopper) { menu = this.props.children(_extends({}, childArgs, { props: menuProps })); } else if (this.popperIsInitialized || show) { // Add it this way, so it doesn't override someones usage // with react-poppers if (toggleNode) popperConfig.referenceElement = toggleNode; menu = _react.default.createElement(_reactPopper.Popper, _extends({}, popperConfig, { innerRef: menuRef, placement: placement, eventsEnabled: !!show, modifiers: _extends({ flip: { enabled: !!flip } }, popperConfig.modifiers) }), function (_ref) { var ref = _ref.ref, style = _ref.style, popper = _objectWithoutPropertiesLoose(_ref, ["ref", "style"]); _this2.scheduleUpdate = popper.scheduleUpdate; return _this2.props.children(_extends({}, popper, childArgs, { props: _extends({}, menuProps, { ref: ref, style: style }) })); }); } return menu && _react.default.createElement(_RootCloseWrapper.default, { disabled: !show, event: rootCloseEvent, onRootClose: this.handleClose }, menu); }; return DropdownMenu; }(_react.default.Component); DropdownMenu.displayName = 'ReactOverlaysDropdownMenu'; DropdownMenu.propTypes = { /** * A render prop that returns a Menu element. The `props` * argument should spread through to **a component that can accept a ref**. * * @type {Function ({ * show: boolean, * alignEnd: boolean, * close: (?SyntheticEvent) => void, * placement: Placement, * outOfBoundaries: ?boolean, * scheduleUpdate: () => void, * props: { * ref: (?HTMLElement) => void, * style: { [string]: string | number }, * aria-labelledby: ?string * }, * arrowProps: { * ref: (?HTMLElement) => void, * style: { [string]: string | number }, * }, * }) => React.Element} */ children: _propTypes.default.func.isRequired, /** * Controls the visible state of the menu, generally this is * provided by the parent `Dropdown` component, * but may also be specified as a prop directly. */ show: _propTypes.default.bool, /** * Aligns the dropdown menu to the 'end' of it's placement position. * Generally this is provided by the parent `Dropdown` component, * but may also be specified as a prop directly. */ alignEnd: _propTypes.default.bool, /** * Enables the Popper.js `flip` modifier, allowing the Dropdown to * automatically adjust it's placement in case of overlap with the viewport or toggle. * Refer to the [flip docs](https://popper.js.org/popper-documentation.html#modifiers..flip.enabled) for more info */ flip: _propTypes.default.bool, usePopper: _propTypes.default.oneOf([true, false]), /** * A set of popper options and props passed directly to react-popper's Popper component. */ popperConfig: _propTypes.default.object, /** * Override the default event used by RootCloseWrapper. */ rootCloseEvent: _propTypes.default.string, /** @private */ onToggle: _propTypes.default.func, /** @private */ menuRef: _propTypes.default.func, /** @private */ drop: _propTypes.default.string, /** @private */ toggleNode: _propTypes.default.any }; DropdownMenu.defaultProps = { usePopper: true }; var DecoratedDropdownMenu = (0, _mapContextToProps.default)(_DropdownContext.default, function (_ref2, props) { var show = _ref2.show, alignEnd = _ref2.alignEnd, toggle = _ref2.toggle, drop = _ref2.drop, menuRef = _ref2.menuRef, toggleNode = _ref2.toggleNode; return { drop: drop, menuRef: menuRef, toggleNode: toggleNode, onToggle: toggle, show: show == null ? props.show : show, alignEnd: alignEnd == null ? props.alignEnd : alignEnd }; }, DropdownMenu); var _default = DecoratedDropdownMenu; exports.default = _default; module.exports = exports.default; /***/ }), /* 36 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = void 0; var _propTypes = _interopRequireDefault(__webpack_require__(0)); var _react = _interopRequireDefault(__webpack_require__(1)); var _DropdownContext = _interopRequireDefault(__webpack_require__(29)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } var propTypes = { /** * A render prop that returns a Toggle element. The `props` * argument should spread through to **a component that can accept a ref**. Use * the `onToggle` argument to toggle the menu open or closed * * @type {Function ({ * show: boolean, * toggle: (show: boolean) => void, * props: { * ref: (?HTMLElement) => void, * aria-haspopup: true * aria-expanded: boolean * }, * }) => React.Element} */ children: _propTypes.default.func.isRequired }; function DropdownToggle(_ref) { var children = _ref.children; return _react.default.createElement(_DropdownContext.default.Consumer, null, function (_ref2) { var show = _ref2.show, toggle = _ref2.toggle, toggleRef = _ref2.toggleRef; return children({ show: show, toggle: toggle, props: { ref: toggleRef, 'aria-haspopup': true, 'aria-expanded': !!show } }); }); } DropdownToggle.displayName = 'ReactOverlaysDropdownToggle'; DropdownToggle.propTypes = propTypes; var _default = DropdownToggle; exports.default = _default; module.exports = exports.default; /***/ }), /* 37 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = void 0; var _class = _interopRequireDefault(__webpack_require__(80)); var _style = _interopRequireDefault(__webpack_require__(11)); var _scrollbarSize = _interopRequireDefault(__webpack_require__(18)); var _isOverflowing = _interopRequireDefault(__webpack_require__(83)); var _manageAriaHidden = __webpack_require__(85); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function findIndexOf(arr, cb) { var idx = -1; arr.some(function (d, i) { if (cb(d, i)) { idx = i; return true; } }); return idx; } /** * Proper state managment for containers and the modals in those containers. * * @internal Used by the Modal to ensure proper styling of containers. */ var ModalManager = /*#__PURE__*/ function () { function ModalManager(_temp) { var _ref = _temp === void 0 ? {} : _temp, _ref$hideSiblingNodes = _ref.hideSiblingNodes, hideSiblingNodes = _ref$hideSiblingNodes === void 0 ? true : _ref$hideSiblingNodes, _ref$handleContainerO = _ref.handleContainerOverflow, handleContainerOverflow = _ref$handleContainerO === void 0 ? true : _ref$handleContainerO; this.hideSiblingNodes = hideSiblingNodes; this.handleContainerOverflow = handleContainerOverflow; this.modals = []; this.containers = []; this.data = []; this.scrollbarSize = (0, _scrollbarSize.default)(); } var _proto = ModalManager.prototype; _proto.isContainerOverflowing = function isContainerOverflowing(modal) { var data = this.data[this.containerIndexFromModal(modal)]; return data && data.overflowing; }; _proto.containerIndexFromModal = function containerIndexFromModal(modal) { return findIndexOf(this.data, function (d) { return d.modals.indexOf(modal) !== -1; }); }; _proto.setContainerStyle = function setContainerStyle(containerState, container) { var style = { overflow: 'hidden' // we are only interested in the actual `style` here // becasue we will override it }; containerState.style = { overflow: container.style.overflow, paddingRight: container.style.paddingRight }; if (containerState.overflowing) { // use computed style, here to get the real padding // to add our scrollbar width style.paddingRight = parseInt((0, _style.default)(container, 'paddingRight') || 0, 10) + this.scrollbarSize + "px"; } (0, _style.default)(container, style); }; _proto.removeContainerStyle = function removeContainerStyle(containerState, container) { var style = containerState.style; Object.keys(style).forEach(function (key) { container.style[key] = style[key]; }); }; _proto.add = function add(modal, container, className) { var modalIdx = this.modals.indexOf(modal); var containerIdx = this.containers.indexOf(container); if (modalIdx !== -1) { return modalIdx; } modalIdx = this.modals.length; this.modals.push(modal); if (this.hideSiblingNodes) { (0, _manageAriaHidden.hideSiblings)(container, modal); } if (containerIdx !== -1) { this.data[containerIdx].modals.push(modal); return modalIdx; } var data = { modals: [modal], //right now only the first modal of a container will have its classes applied classes: className ? className.split(/\s+/) : [], overflowing: (0, _isOverflowing.default)(container) }; if (this.handleContainerOverflow) { this.setContainerStyle(data, container); } data.classes.forEach(_class.default.addClass.bind(null, container)); this.containers.push(container); this.data.push(data); return modalIdx; }; _proto.remove = function remove(modal) { var modalIdx = this.modals.indexOf(modal); if (modalIdx === -1) { return; } var containerIdx = this.containerIndexFromModal(modal); var data = this.data[containerIdx]; var container = this.containers[containerIdx]; data.modals.splice(data.modals.indexOf(modal), 1); this.modals.splice(modalIdx, 1); // if that was the last modal in a container, // clean up the container if (data.modals.length === 0) { data.classes.forEach(_class.default.removeClass.bind(null, container)); if (this.handleContainerOverflow) { this.removeContainerStyle(data, container); } if (this.hideSiblingNodes) { (0, _manageAriaHidden.showSiblings)(container, modal); } this.containers.splice(containerIdx, 1); this.data.splice(containerIdx, 1); } else if (this.hideSiblingNodes) { //otherwise make sure the next top modal is visible to a SR var _data$modals = data.modals[data.modals.length - 1], backdrop = _data$modals.backdrop, dialog = _data$modals.dialog; (0, _manageAriaHidden.ariaHidden)(false, dialog); (0, _manageAriaHidden.ariaHidden)(false, backdrop); } }; _proto.isTopModal = function isTopModal(modal) { return !!this.modals.length && this.modals[this.modals.length - 1] === modal; }; return ModalManager; }(); var _default = ModalManager; exports.default = _default; module.exports = exports.default; /***/ }), /* 38 */ /***/ (function(module, __webpack_exports__, __webpack_require__) { "use strict"; /* WEBPACK VAR INJECTION */(function(global) {/**! * @fileOverview Kickass library to create and place poppers near their reference elements. * @version 1.15.0 * @license * Copyright (c) 2016 Federico Zivolo and contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ var isBrowser = typeof window !== 'undefined' && typeof document !== 'undefined'; var longerTimeoutBrowsers = ['Edge', 'Trident', 'Firefox']; var timeoutDuration = 0; for (var i = 0; i < longerTimeoutBrowsers.length; i += 1) { if (isBrowser && navigator.userAgent.indexOf(longerTimeoutBrowsers[i]) >= 0) { timeoutDuration = 1; break; } } function microtaskDebounce(fn) { var called = false; return function () { if (called) { return; } called = true; window.Promise.resolve().then(function () { called = false; fn(); }); }; } function taskDebounce(fn) { var scheduled = false; return function () { if (!scheduled) { scheduled = true; setTimeout(function () { scheduled = false; fn(); }, timeoutDuration); } }; } var supportsMicroTasks = isBrowser && window.Promise; /** * Create a debounced version of a method, that's asynchronously deferred * but called in the minimum time possible. * * @method * @memberof Popper.Utils * @argument {Function} fn * @returns {Function} */ var debounce = supportsMicroTasks ? microtaskDebounce : taskDebounce; /** * Check if the given variable is a function * @method * @memberof Popper.Utils * @argument {Any} functionToCheck - variable to check * @returns {Boolean} answer to: is a function? */ function isFunction(functionToCheck) { var getType = {}; return functionToCheck && getType.toString.call(functionToCheck) === '[object Function]'; } /** * Get CSS computed property of the given element * @method * @memberof Popper.Utils * @argument {Eement} element * @argument {String} property */ function getStyleComputedProperty(element, property) { if (element.nodeType !== 1) { return []; } // NOTE: 1 DOM access here var window = element.ownerDocument.defaultView; var css = window.getComputedStyle(element, null); return property ? css[property] : css; } /** * Returns the parentNode or the host of the element * @method * @memberof Popper.Utils * @argument {Element} element * @returns {Element} parent */ function getParentNode(element) { if (element.nodeName === 'HTML') { return element; } return element.parentNode || element.host; } /** * Returns the scrolling parent of the given element * @method * @memberof Popper.Utils * @argument {Element} element * @returns {Element} scroll parent */ function getScrollParent(element) { // Return body, `getScroll` will take care to get the correct `scrollTop` from it if (!element) { return document.body; } switch (element.nodeName) { case 'HTML': case 'BODY': return element.ownerDocument.body; case '#document': return element.body; } // Firefox want us to check `-x` and `-y` variations as well var _getStyleComputedProp = getStyleComputedProperty(element), overflow = _getStyleComputedProp.overflow, overflowX = _getStyleComputedProp.overflowX, overflowY = _getStyleComputedProp.overflowY; if (/(auto|scroll|overlay)/.test(overflow + overflowY + overflowX)) { return element; } return getScrollParent(getParentNode(element)); } var isIE11 = isBrowser && !!(window.MSInputMethodContext && document.documentMode); var isIE10 = isBrowser && /MSIE 10/.test(navigator.userAgent); /** * Determines if the browser is Internet Explorer * @method * @memberof Popper.Utils * @param {Number} version to check * @returns {Boolean} isIE */ function isIE(version) { if (version === 11) { return isIE11; } if (version === 10) { return isIE10; } return isIE11 || isIE10; } /** * Returns the offset parent of the given element * @method * @memberof Popper.Utils * @argument {Element} element * @returns {Element} offset parent */ function getOffsetParent(element) { if (!element) { return document.documentElement; } var noOffsetParent = isIE(10) ? document.body : null; // NOTE: 1 DOM access here var offsetParent = element.offsetParent || null; // Skip hidden elements which don't have an offsetParent while (offsetParent === noOffsetParent && element.nextElementSibling) { offsetParent = (element = element.nextElementSibling).offsetParent; } var nodeName = offsetParent && offsetParent.nodeName; if (!nodeName || nodeName === 'BODY' || nodeName === 'HTML') { return element ? element.ownerDocument.documentElement : document.documentElement; } // .offsetParent will return the closest TH, TD or TABLE in case // no offsetParent is present, I hate this job... if (['TH', 'TD', 'TABLE'].indexOf(offsetParent.nodeName) !== -1 && getStyleComputedProperty(offsetParent, 'position') === 'static') { return getOffsetParent(offsetParent); } return offsetParent; } function isOffsetContainer(element) { var nodeName = element.nodeName; if (nodeName === 'BODY') { return false; } return nodeName === 'HTML' || getOffsetParent(element.firstElementChild) === element; } /** * Finds the root node (document, shadowDOM root) of the given element * @method * @memberof Popper.Utils * @argument {Element} node * @returns {Element} root node */ function getRoot(node) { if (node.parentNode !== null) { return getRoot(node.parentNode); } return node; } /** * Finds the offset parent common to the two provided nodes * @method * @memberof Popper.Utils * @argument {Element} element1 * @argument {Element} element2 * @returns {Element} common offset parent */ function findCommonOffsetParent(element1, element2) { // This check is needed to avoid errors in case one of the elements isn't defined for any reason if (!element1 || !element1.nodeType || !element2 || !element2.nodeType) { return document.documentElement; } // Here we make sure to give as "start" the element that comes first in the DOM var order = element1.compareDocumentPosition(element2) & Node.DOCUMENT_POSITION_FOLLOWING; var start = order ? element1 : element2; var end = order ? element2 : element1; // Get common ancestor container var range = document.createRange(); range.setStart(start, 0); range.setEnd(end, 0); var commonAncestorContainer = range.commonAncestorContainer; // Both nodes are inside #document if (element1 !== commonAncestorContainer && element2 !== commonAncestorContainer || start.contains(end)) { if (isOffsetContainer(commonAncestorContainer)) { return commonAncestorContainer; } return getOffsetParent(commonAncestorContainer); } // one of the nodes is inside shadowDOM, find which one var element1root = getRoot(element1); if (element1root.host) { return findCommonOffsetParent(element1root.host, element2); } else { return findCommonOffsetParent(element1, getRoot(element2).host); } } /** * Gets the scroll value of the given element in the given side (top and left) * @method * @memberof Popper.Utils * @argument {Element} element * @argument {String} side `top` or `left` * @returns {number} amount of scrolled pixels */ function getScroll(element) { var side = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'top'; var upperSide = side === 'top' ? 'scrollTop' : 'scrollLeft'; var nodeName = element.nodeName; if (nodeName === 'BODY' || nodeName === 'HTML') { var html = element.ownerDocument.documentElement; var scrollingElement = element.ownerDocument.scrollingElement || html; return scrollingElement[upperSide]; } return element[upperSide]; } /* * Sum or subtract the element scroll values (left and top) from a given rect object * @method * @memberof Popper.Utils * @param {Object} rect - Rect object you want to change * @param {HTMLElement} element - The element from the function reads the scroll values * @param {Boolean} subtract - set to true if you want to subtract the scroll values * @return {Object} rect - The modifier rect object */ function includeScroll(rect, element) { var subtract = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : false; var scrollTop = getScroll(element, 'top'); var scrollLeft = getScroll(element, 'left'); var modifier = subtract ? -1 : 1; rect.top += scrollTop * modifier; rect.bottom += scrollTop * modifier; rect.left += scrollLeft * modifier; rect.right += scrollLeft * modifier; return rect; } /* * Helper to detect borders of a given element * @method * @memberof Popper.Utils * @param {CSSStyleDeclaration} styles * Result of `getStyleComputedProperty` on the given element * @param {String} axis - `x` or `y` * @return {number} borders - The borders size of the given axis */ function getBordersSize(styles, axis) { var sideA = axis === 'x' ? 'Left' : 'Top'; var sideB = sideA === 'Left' ? 'Right' : 'Bottom'; return parseFloat(styles['border' + sideA + 'Width'], 10) + parseFloat(styles['border' + sideB + 'Width'], 10); } function getSize(axis, body, html, computedStyle) { return Math.max(body['offset' + axis], body['scroll' + axis], html['client' + axis], html['offset' + axis], html['scroll' + axis], isIE(10) ? parseInt(html['offset' + axis]) + parseInt(computedStyle['margin' + (axis === 'Height' ? 'Top' : 'Left')]) + parseInt(computedStyle['margin' + (axis === 'Height' ? 'Bottom' : 'Right')]) : 0); } function getWindowSizes(document) { var body = document.body; var html = document.documentElement; var computedStyle = isIE(10) && getComputedStyle(html); return { height: getSize('Height', body, html, computedStyle), width: getSize('Width', body, html, computedStyle) }; } var classCallCheck = function (instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }; var createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); var defineProperty = function (obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }; var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; /** * Given element offsets, generate an output similar to getBoundingClientRect * @method * @memberof Popper.Utils * @argument {Object} offsets * @returns {Object} ClientRect like output */ function getClientRect(offsets) { return _extends({}, offsets, { right: offsets.left + offsets.width, bottom: offsets.top + offsets.height }); } /** * Get bounding client rect of given element * @method * @memberof Popper.Utils * @param {HTMLElement} element * @return {Object} client rect */ function getBoundingClientRect(element) { var rect = {}; // IE10 10 FIX: Please, don't ask, the element isn't // considered in DOM in some circumstances... // This isn't reproducible in IE10 compatibility mode of IE11 try { if (isIE(10)) { rect = element.getBoundingClientRect(); var scrollTop = getScroll(element, 'top'); var scrollLeft = getScroll(element, 'left'); rect.top += scrollTop; rect.left += scrollLeft; rect.bottom += scrollTop; rect.right += scrollLeft; } else { rect = element.getBoundingClientRect(); } } catch (e) {} var result = { left: rect.left, top: rect.top, width: rect.right - rect.left, height: rect.bottom - rect.top }; // subtract scrollbar size from sizes var sizes = element.nodeName === 'HTML' ? getWindowSizes(element.ownerDocument) : {}; var width = sizes.width || element.clientWidth || result.right - result.left; var height = sizes.height || element.clientHeight || result.bottom - result.top; var horizScrollbar = element.offsetWidth - width; var vertScrollbar = element.offsetHeight - height; // if an hypothetical scrollbar is detected, we must be sure it's not a `border` // we make this check conditional for performance reasons if (horizScrollbar || vertScrollbar) { var styles = getStyleComputedProperty(element); horizScrollbar -= getBordersSize(styles, 'x'); vertScrollbar -= getBordersSize(styles, 'y'); result.width -= horizScrollbar; result.height -= vertScrollbar; } return getClientRect(result); } function getOffsetRectRelativeToArbitraryNode(children, parent) { var fixedPosition = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : false; var isIE10 = isIE(10); var isHTML = parent.nodeName === 'HTML'; var childrenRect = getBoundingClientRect(children); var parentRect = getBoundingClientRect(parent); var scrollParent = getScrollParent(children); var styles = getStyleComputedProperty(parent); var borderTopWidth = parseFloat(styles.borderTopWidth, 10); var borderLeftWidth = parseFloat(styles.borderLeftWidth, 10); // In cases where the parent is fixed, we must ignore negative scroll in offset calc if (fixedPosition && isHTML) { parentRect.top = Math.max(parentRect.top, 0); parentRect.left = Math.max(parentRect.left, 0); } var offsets = getClientRect({ top: childrenRect.top - parentRect.top - borderTopWidth, left: childrenRect.left - parentRect.left - borderLeftWidth, width: childrenRect.width, height: childrenRect.height }); offsets.marginTop = 0; offsets.marginLeft = 0; // Subtract margins of documentElement in case it's being used as parent // we do this only on HTML because it's the only element that behaves // differently when margins are applied to it. The margins are included in // the box of the documentElement, in the other cases not. if (!isIE10 && isHTML) { var marginTop = parseFloat(styles.marginTop, 10); var marginLeft = parseFloat(styles.marginLeft, 10); offsets.top -= borderTopWidth - marginTop; offsets.bottom -= borderTopWidth - marginTop; offsets.left -= borderLeftWidth - marginLeft; offsets.right -= borderLeftWidth - marginLeft; // Attach marginTop and marginLeft because in some circumstances we may need them offsets.marginTop = marginTop; offsets.marginLeft = marginLeft; } if (isIE10 && !fixedPosition ? parent.contains(scrollParent) : parent === scrollParent && scrollParent.nodeName !== 'BODY') { offsets = includeScroll(offsets, parent); } return offsets; } function getViewportOffsetRectRelativeToArtbitraryNode(element) { var excludeScroll = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false; var html = element.ownerDocument.documentElement; var relativeOffset = getOffsetRectRelativeToArbitraryNode(element, html); var width = Math.max(html.clientWidth, window.innerWidth || 0); var height = Math.max(html.clientHeight, window.innerHeight || 0); var scrollTop = !excludeScroll ? getScroll(html) : 0; var scrollLeft = !excludeScroll ? getScroll(html, 'left') : 0; var offset = { top: scrollTop - relativeOffset.top + relativeOffset.marginTop, left: scrollLeft - relativeOffset.left + relativeOffset.marginLeft, width: width, height: height }; return getClientRect(offset); } /** * Check if the given element is fixed or is inside a fixed parent * @method * @memberof Popper.Utils * @argument {Element} element * @argument {Element} customContainer * @returns {Boolean} answer to "isFixed?" */ function isFixed(element) { var nodeName = element.nodeName; if (nodeName === 'BODY' || nodeName === 'HTML') { return false; } if (getStyleComputedProperty(element, 'position') === 'fixed') { return true; } var parentNode = getParentNode(element); if (!parentNode) { return false; } return isFixed(parentNode); } /** * Finds the first parent of an element that has a transformed property defined * @method * @memberof Popper.Utils * @argument {Element} element * @returns {Element} first transformed parent or documentElement */ function getFixedPositionOffsetParent(element) { // This check is needed to avoid errors in case one of the elements isn't defined for any reason if (!element || !element.parentElement || isIE()) { return document.documentElement; } var el = element.parentElement; while (el && getStyleComputedProperty(el, 'transform') === 'none') { el = el.parentElement; } return el || document.documentElement; } /** * Computed the boundaries limits and return them * @method * @memberof Popper.Utils * @param {HTMLElement} popper * @param {HTMLElement} reference * @param {number} padding * @param {HTMLElement} boundariesElement - Element used to define the boundaries * @param {Boolean} fixedPosition - Is in fixed position mode * @returns {Object} Coordinates of the boundaries */ function getBoundaries(popper, reference, padding, boundariesElement) { var fixedPosition = arguments.length > 4 && arguments[4] !== undefined ? arguments[4] : false; // NOTE: 1 DOM access here var boundaries = { top: 0, left: 0 }; var offsetParent = fixedPosition ? getFixedPositionOffsetParent(popper) : findCommonOffsetParent(popper, reference); // Handle viewport case if (boundariesElement === 'viewport') { boundaries = getViewportOffsetRectRelativeToArtbitraryNode(offsetParent, fixedPosition); } else { // Handle other cases based on DOM element used as boundaries var boundariesNode = void 0; if (boundariesElement === 'scrollParent') { boundariesNode = getScrollParent(getParentNode(reference)); if (boundariesNode.nodeName === 'BODY') { boundariesNode = popper.ownerDocument.documentElement; } } else if (boundariesElement === 'window') { boundariesNode = popper.ownerDocument.documentElement; } else { boundariesNode = boundariesElement; } var offsets = getOffsetRectRelativeToArbitraryNode(boundariesNode, offsetParent, fixedPosition); // In case of HTML, we need a different computation if (boundariesNode.nodeName === 'HTML' && !isFixed(offsetParent)) { var _getWindowSizes = getWindowSizes(popper.ownerDocument), height = _getWindowSizes.height, width = _getWindowSizes.width; boundaries.top += offsets.top - offsets.marginTop; boundaries.bottom = height + offsets.top; boundaries.left += offsets.left - offsets.marginLeft; boundaries.right = width + offsets.left; } else { // for all the other DOM elements, this one is good boundaries = offsets; } } // Add paddings padding = padding || 0; var isPaddingNumber = typeof padding === 'number'; boundaries.left += isPaddingNumber ? padding : padding.left || 0; boundaries.top += isPaddingNumber ? padding : padding.top || 0; boundaries.right -= isPaddingNumber ? padding : padding.right || 0; boundaries.bottom -= isPaddingNumber ? padding : padding.bottom || 0; return boundaries; } function getArea(_ref) { var width = _ref.width, height = _ref.height; return width * height; } /** * Utility used to transform the `auto` placement to the placement with more * available space. * @method * @memberof Popper.Utils * @argument {Object} data - The data object generated by update method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function computeAutoPlacement(placement, refRect, popper, reference, boundariesElement) { var padding = arguments.length > 5 && arguments[5] !== undefined ? arguments[5] : 0; if (placement.indexOf('auto') === -1) { return placement; } var boundaries = getBoundaries(popper, reference, padding, boundariesElement); var rects = { top: { width: boundaries.width, height: refRect.top - boundaries.top }, right: { width: boundaries.right - refRect.right, height: boundaries.height }, bottom: { width: boundaries.width, height: boundaries.bottom - refRect.bottom }, left: { width: refRect.left - boundaries.left, height: boundaries.height } }; var sortedAreas = Object.keys(rects).map(function (key) { return _extends({ key: key }, rects[key], { area: getArea(rects[key]) }); }).sort(function (a, b) { return b.area - a.area; }); var filteredAreas = sortedAreas.filter(function (_ref2) { var width = _ref2.width, height = _ref2.height; return width >= popper.clientWidth && height >= popper.clientHeight; }); var computedPlacement = filteredAreas.length > 0 ? filteredAreas[0].key : sortedAreas[0].key; var variation = placement.split('-')[1]; return computedPlacement + (variation ? '-' + variation : ''); } /** * Get offsets to the reference element * @method * @memberof Popper.Utils * @param {Object} state * @param {Element} popper - the popper element * @param {Element} reference - the reference element (the popper will be relative to this) * @param {Element} fixedPosition - is in fixed position mode * @returns {Object} An object containing the offsets which will be applied to the popper */ function getReferenceOffsets(state, popper, reference) { var fixedPosition = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : null; var commonOffsetParent = fixedPosition ? getFixedPositionOffsetParent(popper) : findCommonOffsetParent(popper, reference); return getOffsetRectRelativeToArbitraryNode(reference, commonOffsetParent, fixedPosition); } /** * Get the outer sizes of the given element (offset size + margins) * @method * @memberof Popper.Utils * @argument {Element} element * @returns {Object} object containing width and height properties */ function getOuterSizes(element) { var window = element.ownerDocument.defaultView; var styles = window.getComputedStyle(element); var x = parseFloat(styles.marginTop || 0) + parseFloat(styles.marginBottom || 0); var y = parseFloat(styles.marginLeft || 0) + parseFloat(styles.marginRight || 0); var result = { width: element.offsetWidth + y, height: element.offsetHeight + x }; return result; } /** * Get the opposite placement of the given one * @method * @memberof Popper.Utils * @argument {String} placement * @returns {String} flipped placement */ function getOppositePlacement(placement) { var hash = { left: 'right', right: 'left', bottom: 'top', top: 'bottom' }; return placement.replace(/left|right|bottom|top/g, function (matched) { return hash[matched]; }); } /** * Get offsets to the popper * @method * @memberof Popper.Utils * @param {Object} position - CSS position the Popper will get applied * @param {HTMLElement} popper - the popper element * @param {Object} referenceOffsets - the reference offsets (the popper will be relative to this) * @param {String} placement - one of the valid placement options * @returns {Object} popperOffsets - An object containing the offsets which will be applied to the popper */ function getPopperOffsets(popper, referenceOffsets, placement) { placement = placement.split('-')[0]; // Get popper node sizes var popperRect = getOuterSizes(popper); // Add position, width and height to our offsets object var popperOffsets = { width: popperRect.width, height: popperRect.height }; // depending by the popper placement we have to compute its offsets slightly differently var isHoriz = ['right', 'left'].indexOf(placement) !== -1; var mainSide = isHoriz ? 'top' : 'left'; var secondarySide = isHoriz ? 'left' : 'top'; var measurement = isHoriz ? 'height' : 'width'; var secondaryMeasurement = !isHoriz ? 'height' : 'width'; popperOffsets[mainSide] = referenceOffsets[mainSide] + referenceOffsets[measurement] / 2 - popperRect[measurement] / 2; if (placement === secondarySide) { popperOffsets[secondarySide] = referenceOffsets[secondarySide] - popperRect[secondaryMeasurement]; } else { popperOffsets[secondarySide] = referenceOffsets[getOppositePlacement(secondarySide)]; } return popperOffsets; } /** * Mimics the `find` method of Array * @method * @memberof Popper.Utils * @argument {Array} arr * @argument prop * @argument value * @returns index or -1 */ function find(arr, check) { // use native find if supported if (Array.prototype.find) { return arr.find(check); } // use `filter` to obtain the same behavior of `find` return arr.filter(check)[0]; } /** * Return the index of the matching object * @method * @memberof Popper.Utils * @argument {Array} arr * @argument prop * @argument value * @returns index or -1 */ function findIndex(arr, prop, value) { // use native findIndex if supported if (Array.prototype.findIndex) { return arr.findIndex(function (cur) { return cur[prop] === value; }); } // use `find` + `indexOf` if `findIndex` isn't supported var match = find(arr, function (obj) { return obj[prop] === value; }); return arr.indexOf(match); } /** * Loop trough the list of modifiers and run them in order, * each of them will then edit the data object. * @method * @memberof Popper.Utils * @param {dataObject} data * @param {Array} modifiers * @param {String} ends - Optional modifier name used as stopper * @returns {dataObject} */ function runModifiers(modifiers, data, ends) { var modifiersToRun = ends === undefined ? modifiers : modifiers.slice(0, findIndex(modifiers, 'name', ends)); modifiersToRun.forEach(function (modifier) { if (modifier['function']) { // eslint-disable-line dot-notation console.warn('`modifier.function` is deprecated, use `modifier.fn`!'); } var fn = modifier['function'] || modifier.fn; // eslint-disable-line dot-notation if (modifier.enabled && isFunction(fn)) { // Add properties to offsets to make them a complete clientRect object // we do this before each modifier to make sure the previous one doesn't // mess with these values data.offsets.popper = getClientRect(data.offsets.popper); data.offsets.reference = getClientRect(data.offsets.reference); data = fn(data, modifier); } }); return data; } /** * Updates the position of the popper, computing the new offsets and applying * the new style.
* Prefer `scheduleUpdate` over `update` because of performance reasons. * @method * @memberof Popper */ function update() { // if popper is destroyed, don't perform any further update if (this.state.isDestroyed) { return; } var data = { instance: this, styles: {}, arrowStyles: {}, attributes: {}, flipped: false, offsets: {} }; // compute reference element offsets data.offsets.reference = getReferenceOffsets(this.state, this.popper, this.reference, this.options.positionFixed); // compute auto placement, store placement inside the data object, // modifiers will be able to edit `placement` if needed // and refer to originalPlacement to know the original value data.placement = computeAutoPlacement(this.options.placement, data.offsets.reference, this.popper, this.reference, this.options.modifiers.flip.boundariesElement, this.options.modifiers.flip.padding); // store the computed placement inside `originalPlacement` data.originalPlacement = data.placement; data.positionFixed = this.options.positionFixed; // compute the popper offsets data.offsets.popper = getPopperOffsets(this.popper, data.offsets.reference, data.placement); data.offsets.popper.position = this.options.positionFixed ? 'fixed' : 'absolute'; // run the modifiers data = runModifiers(this.modifiers, data); // the first `update` will call `onCreate` callback // the other ones will call `onUpdate` callback if (!this.state.isCreated) { this.state.isCreated = true; this.options.onCreate(data); } else { this.options.onUpdate(data); } } /** * Helper used to know if the given modifier is enabled. * @method * @memberof Popper.Utils * @returns {Boolean} */ function isModifierEnabled(modifiers, modifierName) { return modifiers.some(function (_ref) { var name = _ref.name, enabled = _ref.enabled; return enabled && name === modifierName; }); } /** * Get the prefixed supported property name * @method * @memberof Popper.Utils * @argument {String} property (camelCase) * @returns {String} prefixed property (camelCase or PascalCase, depending on the vendor prefix) */ function getSupportedPropertyName(property) { var prefixes = [false, 'ms', 'Webkit', 'Moz', 'O']; var upperProp = property.charAt(0).toUpperCase() + property.slice(1); for (var i = 0; i < prefixes.length; i++) { var prefix = prefixes[i]; var toCheck = prefix ? '' + prefix + upperProp : property; if (typeof document.body.style[toCheck] !== 'undefined') { return toCheck; } } return null; } /** * Destroys the popper. * @method * @memberof Popper */ function destroy() { this.state.isDestroyed = true; // touch DOM only if `applyStyle` modifier is enabled if (isModifierEnabled(this.modifiers, 'applyStyle')) { this.popper.removeAttribute('x-placement'); this.popper.style.position = ''; this.popper.style.top = ''; this.popper.style.left = ''; this.popper.style.right = ''; this.popper.style.bottom = ''; this.popper.style.willChange = ''; this.popper.style[getSupportedPropertyName('transform')] = ''; } this.disableEventListeners(); // remove the popper if user explicity asked for the deletion on destroy // do not use `remove` because IE11 doesn't support it if (this.options.removeOnDestroy) { this.popper.parentNode.removeChild(this.popper); } return this; } /** * Get the window associated with the element * @argument {Element} element * @returns {Window} */ function getWindow(element) { var ownerDocument = element.ownerDocument; return ownerDocument ? ownerDocument.defaultView : window; } function attachToScrollParents(scrollParent, event, callback, scrollParents) { var isBody = scrollParent.nodeName === 'BODY'; var target = isBody ? scrollParent.ownerDocument.defaultView : scrollParent; target.addEventListener(event, callback, { passive: true }); if (!isBody) { attachToScrollParents(getScrollParent(target.parentNode), event, callback, scrollParents); } scrollParents.push(target); } /** * Setup needed event listeners used to update the popper position * @method * @memberof Popper.Utils * @private */ function setupEventListeners(reference, options, state, updateBound) { // Resize event listener on window state.updateBound = updateBound; getWindow(reference).addEventListener('resize', state.updateBound, { passive: true }); // Scroll event listener on scroll parents var scrollElement = getScrollParent(reference); attachToScrollParents(scrollElement, 'scroll', state.updateBound, state.scrollParents); state.scrollElement = scrollElement; state.eventsEnabled = true; return state; } /** * It will add resize/scroll events and start recalculating * position of the popper element when they are triggered. * @method * @memberof Popper */ function enableEventListeners() { if (!this.state.eventsEnabled) { this.state = setupEventListeners(this.reference, this.options, this.state, this.scheduleUpdate); } } /** * Remove event listeners used to update the popper position * @method * @memberof Popper.Utils * @private */ function removeEventListeners(reference, state) { // Remove resize event listener on window getWindow(reference).removeEventListener('resize', state.updateBound); // Remove scroll event listener on scroll parents state.scrollParents.forEach(function (target) { target.removeEventListener('scroll', state.updateBound); }); // Reset state state.updateBound = null; state.scrollParents = []; state.scrollElement = null; state.eventsEnabled = false; return state; } /** * It will remove resize/scroll events and won't recalculate popper position * when they are triggered. It also won't trigger `onUpdate` callback anymore, * unless you call `update` method manually. * @method * @memberof Popper */ function disableEventListeners() { if (this.state.eventsEnabled) { cancelAnimationFrame(this.scheduleUpdate); this.state = removeEventListeners(this.reference, this.state); } } /** * Tells if a given input is a number * @method * @memberof Popper.Utils * @param {*} input to check * @return {Boolean} */ function isNumeric(n) { return n !== '' && !isNaN(parseFloat(n)) && isFinite(n); } /** * Set the style to the given popper * @method * @memberof Popper.Utils * @argument {Element} element - Element to apply the style to * @argument {Object} styles * Object with a list of properties and values which will be applied to the element */ function setStyles(element, styles) { Object.keys(styles).forEach(function (prop) { var unit = ''; // add unit if the value is numeric and is one of the following if (['width', 'height', 'top', 'right', 'bottom', 'left'].indexOf(prop) !== -1 && isNumeric(styles[prop])) { unit = 'px'; } element.style[prop] = styles[prop] + unit; }); } /** * Set the attributes to the given popper * @method * @memberof Popper.Utils * @argument {Element} element - Element to apply the attributes to * @argument {Object} styles * Object with a list of properties and values which will be applied to the element */ function setAttributes(element, attributes) { Object.keys(attributes).forEach(function (prop) { var value = attributes[prop]; if (value !== false) { element.setAttribute(prop, attributes[prop]); } else { element.removeAttribute(prop); } }); } /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by `update` method * @argument {Object} data.styles - List of style properties - values to apply to popper element * @argument {Object} data.attributes - List of attribute properties - values to apply to popper element * @argument {Object} options - Modifiers configuration and options * @returns {Object} The same data object */ function applyStyle(data) { // any property present in `data.styles` will be applied to the popper, // in this way we can make the 3rd party modifiers add custom styles to it // Be aware, modifiers could override the properties defined in the previous // lines of this modifier! setStyles(data.instance.popper, data.styles); // any property present in `data.attributes` will be applied to the popper, // they will be set as HTML attributes of the element setAttributes(data.instance.popper, data.attributes); // if arrowElement is defined and arrowStyles has some properties if (data.arrowElement && Object.keys(data.arrowStyles).length) { setStyles(data.arrowElement, data.arrowStyles); } return data; } /** * Set the x-placement attribute before everything else because it could be used * to add margins to the popper margins needs to be calculated to get the * correct popper offsets. * @method * @memberof Popper.modifiers * @param {HTMLElement} reference - The reference element used to position the popper * @param {HTMLElement} popper - The HTML element used as popper * @param {Object} options - Popper.js options */ function applyStyleOnLoad(reference, popper, options, modifierOptions, state) { // compute reference element offsets var referenceOffsets = getReferenceOffsets(state, popper, reference, options.positionFixed); // compute auto placement, store placement inside the data object, // modifiers will be able to edit `placement` if needed // and refer to originalPlacement to know the original value var placement = computeAutoPlacement(options.placement, referenceOffsets, popper, reference, options.modifiers.flip.boundariesElement, options.modifiers.flip.padding); popper.setAttribute('x-placement', placement); // Apply `position` to popper before anything else because // without the position applied we can't guarantee correct computations setStyles(popper, { position: options.positionFixed ? 'fixed' : 'absolute' }); return options; } /** * @function * @memberof Popper.Utils * @argument {Object} data - The data object generated by `update` method * @argument {Boolean} shouldRound - If the offsets should be rounded at all * @returns {Object} The popper's position offsets rounded * * The tale of pixel-perfect positioning. It's still not 100% perfect, but as * good as it can be within reason. * Discussion here: https://github.com/FezVrasta/popper.js/pull/715 * * Low DPI screens cause a popper to be blurry if not using full pixels (Safari * as well on High DPI screens). * * Firefox prefers no rounding for positioning and does not have blurriness on * high DPI screens. * * Only horizontal placement and left/right values need to be considered. */ function getRoundedOffsets(data, shouldRound) { var _data$offsets = data.offsets, popper = _data$offsets.popper, reference = _data$offsets.reference; var round = Math.round, floor = Math.floor; var noRound = function noRound(v) { return v; }; var referenceWidth = round(reference.width); var popperWidth = round(popper.width); var isVertical = ['left', 'right'].indexOf(data.placement) !== -1; var isVariation = data.placement.indexOf('-') !== -1; var sameWidthParity = referenceWidth % 2 === popperWidth % 2; var bothOddWidth = referenceWidth % 2 === 1 && popperWidth % 2 === 1; var horizontalToInteger = !shouldRound ? noRound : isVertical || isVariation || sameWidthParity ? round : floor; var verticalToInteger = !shouldRound ? noRound : round; return { left: horizontalToInteger(bothOddWidth && !isVariation && shouldRound ? popper.left - 1 : popper.left), top: verticalToInteger(popper.top), bottom: verticalToInteger(popper.bottom), right: horizontalToInteger(popper.right) }; } var isFirefox = isBrowser && /Firefox/i.test(navigator.userAgent); /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by `update` method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function computeStyle(data, options) { var x = options.x, y = options.y; var popper = data.offsets.popper; // Remove this legacy support in Popper.js v2 var legacyGpuAccelerationOption = find(data.instance.modifiers, function (modifier) { return modifier.name === 'applyStyle'; }).gpuAcceleration; if (legacyGpuAccelerationOption !== undefined) { console.warn('WARNING: `gpuAcceleration` option moved to `computeStyle` modifier and will not be supported in future versions of Popper.js!'); } var gpuAcceleration = legacyGpuAccelerationOption !== undefined ? legacyGpuAccelerationOption : options.gpuAcceleration; var offsetParent = getOffsetParent(data.instance.popper); var offsetParentRect = getBoundingClientRect(offsetParent); // Styles var styles = { position: popper.position }; var offsets = getRoundedOffsets(data, window.devicePixelRatio < 2 || !isFirefox); var sideA = x === 'bottom' ? 'top' : 'bottom'; var sideB = y === 'right' ? 'left' : 'right'; // if gpuAcceleration is set to `true` and transform is supported, // we use `translate3d` to apply the position to the popper we // automatically use the supported prefixed version if needed var prefixedProperty = getSupportedPropertyName('transform'); // now, let's make a step back and look at this code closely (wtf?) // If the content of the popper grows once it's been positioned, it // may happen that the popper gets misplaced because of the new content // overflowing its reference element // To avoid this problem, we provide two options (x and y), which allow // the consumer to define the offset origin. // If we position a popper on top of a reference element, we can set // `x` to `top` to make the popper grow towards its top instead of // its bottom. var left = void 0, top = void 0; if (sideA === 'bottom') { // when offsetParent is the positioning is relative to the bottom of the screen (excluding the scrollbar) // and not the bottom of the html element if (offsetParent.nodeName === 'HTML') { top = -offsetParent.clientHeight + offsets.bottom; } else { top = -offsetParentRect.height + offsets.bottom; } } else { top = offsets.top; } if (sideB === 'right') { if (offsetParent.nodeName === 'HTML') { left = -offsetParent.clientWidth + offsets.right; } else { left = -offsetParentRect.width + offsets.right; } } else { left = offsets.left; } if (gpuAcceleration && prefixedProperty) { styles[prefixedProperty] = 'translate3d(' + left + 'px, ' + top + 'px, 0)'; styles[sideA] = 0; styles[sideB] = 0; styles.willChange = 'transform'; } else { // othwerise, we use the standard `top`, `left`, `bottom` and `right` properties var invertTop = sideA === 'bottom' ? -1 : 1; var invertLeft = sideB === 'right' ? -1 : 1; styles[sideA] = top * invertTop; styles[sideB] = left * invertLeft; styles.willChange = sideA + ', ' + sideB; } // Attributes var attributes = { 'x-placement': data.placement }; // Update `data` attributes, styles and arrowStyles data.attributes = _extends({}, attributes, data.attributes); data.styles = _extends({}, styles, data.styles); data.arrowStyles = _extends({}, data.offsets.arrow, data.arrowStyles); return data; } /** * Helper used to know if the given modifier depends from another one.
* It checks if the needed modifier is listed and enabled. * @method * @memberof Popper.Utils * @param {Array} modifiers - list of modifiers * @param {String} requestingName - name of requesting modifier * @param {String} requestedName - name of requested modifier * @returns {Boolean} */ function isModifierRequired(modifiers, requestingName, requestedName) { var requesting = find(modifiers, function (_ref) { var name = _ref.name; return name === requestingName; }); var isRequired = !!requesting && modifiers.some(function (modifier) { return modifier.name === requestedName && modifier.enabled && modifier.order < requesting.order; }); if (!isRequired) { var _requesting = '`' + requestingName + '`'; var requested = '`' + requestedName + '`'; console.warn(requested + ' modifier is required by ' + _requesting + ' modifier in order to work, be sure to include it before ' + _requesting + '!'); } return isRequired; } /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by update method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function arrow(data, options) { var _data$offsets$arrow; // arrow depends on keepTogether in order to work if (!isModifierRequired(data.instance.modifiers, 'arrow', 'keepTogether')) { return data; } var arrowElement = options.element; // if arrowElement is a string, suppose it's a CSS selector if (typeof arrowElement === 'string') { arrowElement = data.instance.popper.querySelector(arrowElement); // if arrowElement is not found, don't run the modifier if (!arrowElement) { return data; } } else { // if the arrowElement isn't a query selector we must check that the // provided DOM node is child of its popper node if (!data.instance.popper.contains(arrowElement)) { console.warn('WARNING: `arrow.element` must be child of its popper element!'); return data; } } var placement = data.placement.split('-')[0]; var _data$offsets = data.offsets, popper = _data$offsets.popper, reference = _data$offsets.reference; var isVertical = ['left', 'right'].indexOf(placement) !== -1; var len = isVertical ? 'height' : 'width'; var sideCapitalized = isVertical ? 'Top' : 'Left'; var side = sideCapitalized.toLowerCase(); var altSide = isVertical ? 'left' : 'top'; var opSide = isVertical ? 'bottom' : 'right'; var arrowElementSize = getOuterSizes(arrowElement)[len]; // // extends keepTogether behavior making sure the popper and its // reference have enough pixels in conjunction // // top/left side if (reference[opSide] - arrowElementSize < popper[side]) { data.offsets.popper[side] -= popper[side] - (reference[opSide] - arrowElementSize); } // bottom/right side if (reference[side] + arrowElementSize > popper[opSide]) { data.offsets.popper[side] += reference[side] + arrowElementSize - popper[opSide]; } data.offsets.popper = getClientRect(data.offsets.popper); // compute center of the popper var center = reference[side] + reference[len] / 2 - arrowElementSize / 2; // Compute the sideValue using the updated popper offsets // take popper margin in account because we don't have this info available var css = getStyleComputedProperty(data.instance.popper); var popperMarginSide = parseFloat(css['margin' + sideCapitalized], 10); var popperBorderSide = parseFloat(css['border' + sideCapitalized + 'Width'], 10); var sideValue = center - data.offsets.popper[side] - popperMarginSide - popperBorderSide; // prevent arrowElement from being placed not contiguously to its popper sideValue = Math.max(Math.min(popper[len] - arrowElementSize, sideValue), 0); data.arrowElement = arrowElement; data.offsets.arrow = (_data$offsets$arrow = {}, defineProperty(_data$offsets$arrow, side, Math.round(sideValue)), defineProperty(_data$offsets$arrow, altSide, ''), _data$offsets$arrow); return data; } /** * Get the opposite placement variation of the given one * @method * @memberof Popper.Utils * @argument {String} placement variation * @returns {String} flipped placement variation */ function getOppositeVariation(variation) { if (variation === 'end') { return 'start'; } else if (variation === 'start') { return 'end'; } return variation; } /** * List of accepted placements to use as values of the `placement` option.
* Valid placements are: * - `auto` * - `top` * - `right` * - `bottom` * - `left` * * Each placement can have a variation from this list: * - `-start` * - `-end` * * Variations are interpreted easily if you think of them as the left to right * written languages. Horizontally (`top` and `bottom`), `start` is left and `end` * is right.
* Vertically (`left` and `right`), `start` is top and `end` is bottom. * * Some valid examples are: * - `top-end` (on top of reference, right aligned) * - `right-start` (on right of reference, top aligned) * - `bottom` (on bottom, centered) * - `auto-end` (on the side with more space available, alignment depends by placement) * * @static * @type {Array} * @enum {String} * @readonly * @method placements * @memberof Popper */ var placements = ['auto-start', 'auto', 'auto-end', 'top-start', 'top', 'top-end', 'right-start', 'right', 'right-end', 'bottom-end', 'bottom', 'bottom-start', 'left-end', 'left', 'left-start']; // Get rid of `auto` `auto-start` and `auto-end` var validPlacements = placements.slice(3); /** * Given an initial placement, returns all the subsequent placements * clockwise (or counter-clockwise). * * @method * @memberof Popper.Utils * @argument {String} placement - A valid placement (it accepts variations) * @argument {Boolean} counter - Set to true to walk the placements counterclockwise * @returns {Array} placements including their variations */ function clockwise(placement) { var counter = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false; var index = validPlacements.indexOf(placement); var arr = validPlacements.slice(index + 1).concat(validPlacements.slice(0, index)); return counter ? arr.reverse() : arr; } var BEHAVIORS = { FLIP: 'flip', CLOCKWISE: 'clockwise', COUNTERCLOCKWISE: 'counterclockwise' }; /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by update method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function flip(data, options) { // if `inner` modifier is enabled, we can't use the `flip` modifier if (isModifierEnabled(data.instance.modifiers, 'inner')) { return data; } if (data.flipped && data.placement === data.originalPlacement) { // seems like flip is trying to loop, probably there's not enough space on any of the flippable sides return data; } var boundaries = getBoundaries(data.instance.popper, data.instance.reference, options.padding, options.boundariesElement, data.positionFixed); var placement = data.placement.split('-')[0]; var placementOpposite = getOppositePlacement(placement); var variation = data.placement.split('-')[1] || ''; var flipOrder = []; switch (options.behavior) { case BEHAVIORS.FLIP: flipOrder = [placement, placementOpposite]; break; case BEHAVIORS.CLOCKWISE: flipOrder = clockwise(placement); break; case BEHAVIORS.COUNTERCLOCKWISE: flipOrder = clockwise(placement, true); break; default: flipOrder = options.behavior; } flipOrder.forEach(function (step, index) { if (placement !== step || flipOrder.length === index + 1) { return data; } placement = data.placement.split('-')[0]; placementOpposite = getOppositePlacement(placement); var popperOffsets = data.offsets.popper; var refOffsets = data.offsets.reference; // using floor because the reference offsets may contain decimals we are not going to consider here var floor = Math.floor; var overlapsRef = placement === 'left' && floor(popperOffsets.right) > floor(refOffsets.left) || placement === 'right' && floor(popperOffsets.left) < floor(refOffsets.right) || placement === 'top' && floor(popperOffsets.bottom) > floor(refOffsets.top) || placement === 'bottom' && floor(popperOffsets.top) < floor(refOffsets.bottom); var overflowsLeft = floor(popperOffsets.left) < floor(boundaries.left); var overflowsRight = floor(popperOffsets.right) > floor(boundaries.right); var overflowsTop = floor(popperOffsets.top) < floor(boundaries.top); var overflowsBottom = floor(popperOffsets.bottom) > floor(boundaries.bottom); var overflowsBoundaries = placement === 'left' && overflowsLeft || placement === 'right' && overflowsRight || placement === 'top' && overflowsTop || placement === 'bottom' && overflowsBottom; // flip the variation if required var isVertical = ['top', 'bottom'].indexOf(placement) !== -1; // flips variation if reference element overflows boundaries var flippedVariationByRef = !!options.flipVariations && (isVertical && variation === 'start' && overflowsLeft || isVertical && variation === 'end' && overflowsRight || !isVertical && variation === 'start' && overflowsTop || !isVertical && variation === 'end' && overflowsBottom); // flips variation if popper content overflows boundaries var flippedVariationByContent = !!options.flipVariationsByContent && (isVertical && variation === 'start' && overflowsRight || isVertical && variation === 'end' && overflowsLeft || !isVertical && variation === 'start' && overflowsBottom || !isVertical && variation === 'end' && overflowsTop); var flippedVariation = flippedVariationByRef || flippedVariationByContent; if (overlapsRef || overflowsBoundaries || flippedVariation) { // this boolean to detect any flip loop data.flipped = true; if (overlapsRef || overflowsBoundaries) { placement = flipOrder[index + 1]; } if (flippedVariation) { variation = getOppositeVariation(variation); } data.placement = placement + (variation ? '-' + variation : ''); // this object contains `position`, we want to preserve it along with // any additional property we may add in the future data.offsets.popper = _extends({}, data.offsets.popper, getPopperOffsets(data.instance.popper, data.offsets.reference, data.placement)); data = runModifiers(data.instance.modifiers, data, 'flip'); } }); return data; } /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by update method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function keepTogether(data) { var _data$offsets = data.offsets, popper = _data$offsets.popper, reference = _data$offsets.reference; var placement = data.placement.split('-')[0]; var floor = Math.floor; var isVertical = ['top', 'bottom'].indexOf(placement) !== -1; var side = isVertical ? 'right' : 'bottom'; var opSide = isVertical ? 'left' : 'top'; var measurement = isVertical ? 'width' : 'height'; if (popper[side] < floor(reference[opSide])) { data.offsets.popper[opSide] = floor(reference[opSide]) - popper[measurement]; } if (popper[opSide] > floor(reference[side])) { data.offsets.popper[opSide] = floor(reference[side]); } return data; } /** * Converts a string containing value + unit into a px value number * @function * @memberof {modifiers~offset} * @private * @argument {String} str - Value + unit string * @argument {String} measurement - `height` or `width` * @argument {Object} popperOffsets * @argument {Object} referenceOffsets * @returns {Number|String} * Value in pixels, or original string if no values were extracted */ function toValue(str, measurement, popperOffsets, referenceOffsets) { // separate value from unit var split = str.match(/((?:\-|\+)?\d*\.?\d*)(.*)/); var value = +split[1]; var unit = split[2]; // If it's not a number it's an operator, I guess if (!value) { return str; } if (unit.indexOf('%') === 0) { var element = void 0; switch (unit) { case '%p': element = popperOffsets; break; case '%': case '%r': default: element = referenceOffsets; } var rect = getClientRect(element); return rect[measurement] / 100 * value; } else if (unit === 'vh' || unit === 'vw') { // if is a vh or vw, we calculate the size based on the viewport var size = void 0; if (unit === 'vh') { size = Math.max(document.documentElement.clientHeight, window.innerHeight || 0); } else { size = Math.max(document.documentElement.clientWidth, window.innerWidth || 0); } return size / 100 * value; } else { // if is an explicit pixel unit, we get rid of the unit and keep the value // if is an implicit unit, it's px, and we return just the value return value; } } /** * Parse an `offset` string to extrapolate `x` and `y` numeric offsets. * @function * @memberof {modifiers~offset} * @private * @argument {String} offset * @argument {Object} popperOffsets * @argument {Object} referenceOffsets * @argument {String} basePlacement * @returns {Array} a two cells array with x and y offsets in numbers */ function parseOffset(offset, popperOffsets, referenceOffsets, basePlacement) { var offsets = [0, 0]; // Use height if placement is left or right and index is 0 otherwise use width // in this way the first offset will use an axis and the second one // will use the other one var useHeight = ['right', 'left'].indexOf(basePlacement) !== -1; // Split the offset string to obtain a list of values and operands // The regex addresses values with the plus or minus sign in front (+10, -20, etc) var fragments = offset.split(/(\+|\-)/).map(function (frag) { return frag.trim(); }); // Detect if the offset string contains a pair of values or a single one // they could be separated by comma or space var divider = fragments.indexOf(find(fragments, function (frag) { return frag.search(/,|\s/) !== -1; })); if (fragments[divider] && fragments[divider].indexOf(',') === -1) { console.warn('Offsets separated by white space(s) are deprecated, use a comma (,) instead.'); } // If divider is found, we divide the list of values and operands to divide // them by ofset X and Y. var splitRegex = /\s*,\s*|\s+/; var ops = divider !== -1 ? [fragments.slice(0, divider).concat([fragments[divider].split(splitRegex)[0]]), [fragments[divider].split(splitRegex)[1]].concat(fragments.slice(divider + 1))] : [fragments]; // Convert the values with units to absolute pixels to allow our computations ops = ops.map(function (op, index) { // Most of the units rely on the orientation of the popper var measurement = (index === 1 ? !useHeight : useHeight) ? 'height' : 'width'; var mergeWithPrevious = false; return op // This aggregates any `+` or `-` sign that aren't considered operators // e.g.: 10 + +5 => [10, +, +5] .reduce(function (a, b) { if (a[a.length - 1] === '' && ['+', '-'].indexOf(b) !== -1) { a[a.length - 1] = b; mergeWithPrevious = true; return a; } else if (mergeWithPrevious) { a[a.length - 1] += b; mergeWithPrevious = false; return a; } else { return a.concat(b); } }, []) // Here we convert the string values into number values (in px) .map(function (str) { return toValue(str, measurement, popperOffsets, referenceOffsets); }); }); // Loop trough the offsets arrays and execute the operations ops.forEach(function (op, index) { op.forEach(function (frag, index2) { if (isNumeric(frag)) { offsets[index] += frag * (op[index2 - 1] === '-' ? -1 : 1); } }); }); return offsets; } /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by update method * @argument {Object} options - Modifiers configuration and options * @argument {Number|String} options.offset=0 * The offset value as described in the modifier description * @returns {Object} The data object, properly modified */ function offset(data, _ref) { var offset = _ref.offset; var placement = data.placement, _data$offsets = data.offsets, popper = _data$offsets.popper, reference = _data$offsets.reference; var basePlacement = placement.split('-')[0]; var offsets = void 0; if (isNumeric(+offset)) { offsets = [+offset, 0]; } else { offsets = parseOffset(offset, popper, reference, basePlacement); } if (basePlacement === 'left') { popper.top += offsets[0]; popper.left -= offsets[1]; } else if (basePlacement === 'right') { popper.top += offsets[0]; popper.left += offsets[1]; } else if (basePlacement === 'top') { popper.left += offsets[0]; popper.top -= offsets[1]; } else if (basePlacement === 'bottom') { popper.left += offsets[0]; popper.top += offsets[1]; } data.popper = popper; return data; } /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by `update` method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function preventOverflow(data, options) { var boundariesElement = options.boundariesElement || getOffsetParent(data.instance.popper); // If offsetParent is the reference element, we really want to // go one step up and use the next offsetParent as reference to // avoid to make this modifier completely useless and look like broken if (data.instance.reference === boundariesElement) { boundariesElement = getOffsetParent(boundariesElement); } // NOTE: DOM access here // resets the popper's position so that the document size can be calculated excluding // the size of the popper element itself var transformProp = getSupportedPropertyName('transform'); var popperStyles = data.instance.popper.style; // assignment to help minification var top = popperStyles.top, left = popperStyles.left, transform = popperStyles[transformProp]; popperStyles.top = ''; popperStyles.left = ''; popperStyles[transformProp] = ''; var boundaries = getBoundaries(data.instance.popper, data.instance.reference, options.padding, boundariesElement, data.positionFixed); // NOTE: DOM access here // restores the original style properties after the offsets have been computed popperStyles.top = top; popperStyles.left = left; popperStyles[transformProp] = transform; options.boundaries = boundaries; var order = options.priority; var popper = data.offsets.popper; var check = { primary: function primary(placement) { var value = popper[placement]; if (popper[placement] < boundaries[placement] && !options.escapeWithReference) { value = Math.max(popper[placement], boundaries[placement]); } return defineProperty({}, placement, value); }, secondary: function secondary(placement) { var mainSide = placement === 'right' ? 'left' : 'top'; var value = popper[mainSide]; if (popper[placement] > boundaries[placement] && !options.escapeWithReference) { value = Math.min(popper[mainSide], boundaries[placement] - (placement === 'right' ? popper.width : popper.height)); } return defineProperty({}, mainSide, value); } }; order.forEach(function (placement) { var side = ['left', 'top'].indexOf(placement) !== -1 ? 'primary' : 'secondary'; popper = _extends({}, popper, check[side](placement)); }); data.offsets.popper = popper; return data; } /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by `update` method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function shift(data) { var placement = data.placement; var basePlacement = placement.split('-')[0]; var shiftvariation = placement.split('-')[1]; // if shift shiftvariation is specified, run the modifier if (shiftvariation) { var _data$offsets = data.offsets, reference = _data$offsets.reference, popper = _data$offsets.popper; var isVertical = ['bottom', 'top'].indexOf(basePlacement) !== -1; var side = isVertical ? 'left' : 'top'; var measurement = isVertical ? 'width' : 'height'; var shiftOffsets = { start: defineProperty({}, side, reference[side]), end: defineProperty({}, side, reference[side] + reference[measurement] - popper[measurement]) }; data.offsets.popper = _extends({}, popper, shiftOffsets[shiftvariation]); } return data; } /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by update method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function hide(data) { if (!isModifierRequired(data.instance.modifiers, 'hide', 'preventOverflow')) { return data; } var refRect = data.offsets.reference; var bound = find(data.instance.modifiers, function (modifier) { return modifier.name === 'preventOverflow'; }).boundaries; if (refRect.bottom < bound.top || refRect.left > bound.right || refRect.top > bound.bottom || refRect.right < bound.left) { // Avoid unnecessary DOM access if visibility hasn't changed if (data.hide === true) { return data; } data.hide = true; data.attributes['x-out-of-boundaries'] = ''; } else { // Avoid unnecessary DOM access if visibility hasn't changed if (data.hide === false) { return data; } data.hide = false; data.attributes['x-out-of-boundaries'] = false; } return data; } /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by `update` method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function inner(data) { var placement = data.placement; var basePlacement = placement.split('-')[0]; var _data$offsets = data.offsets, popper = _data$offsets.popper, reference = _data$offsets.reference; var isHoriz = ['left', 'right'].indexOf(basePlacement) !== -1; var subtractLength = ['top', 'left'].indexOf(basePlacement) === -1; popper[isHoriz ? 'left' : 'top'] = reference[basePlacement] - (subtractLength ? popper[isHoriz ? 'width' : 'height'] : 0); data.placement = getOppositePlacement(placement); data.offsets.popper = getClientRect(popper); return data; } /** * Modifier function, each modifier can have a function of this type assigned * to its `fn` property.
* These functions will be called on each update, this means that you must * make sure they are performant enough to avoid performance bottlenecks. * * @function ModifierFn * @argument {dataObject} data - The data object generated by `update` method * @argument {Object} options - Modifiers configuration and options * @returns {dataObject} The data object, properly modified */ /** * Modifiers are plugins used to alter the behavior of your poppers.
* Popper.js uses a set of 9 modifiers to provide all the basic functionalities * needed by the library. * * Usually you don't want to override the `order`, `fn` and `onLoad` props. * All the other properties are configurations that could be tweaked. * @namespace modifiers */ var modifiers = { /** * Modifier used to shift the popper on the start or end of its reference * element.
* It will read the variation of the `placement` property.
* It can be one either `-end` or `-start`. * @memberof modifiers * @inner */ shift: { /** @prop {number} order=100 - Index used to define the order of execution */ order: 100, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: shift }, /** * The `offset` modifier can shift your popper on both its axis. * * It accepts the following units: * - `px` or unit-less, interpreted as pixels * - `%` or `%r`, percentage relative to the length of the reference element * - `%p`, percentage relative to the length of the popper element * - `vw`, CSS viewport width unit * - `vh`, CSS viewport height unit * * For length is intended the main axis relative to the placement of the popper.
* This means that if the placement is `top` or `bottom`, the length will be the * `width`. In case of `left` or `right`, it will be the `height`. * * You can provide a single value (as `Number` or `String`), or a pair of values * as `String` divided by a comma or one (or more) white spaces.
* The latter is a deprecated method because it leads to confusion and will be * removed in v2.
* Additionally, it accepts additions and subtractions between different units. * Note that multiplications and divisions aren't supported. * * Valid examples are: * ``` * 10 * '10%' * '10, 10' * '10%, 10' * '10 + 10%' * '10 - 5vh + 3%' * '-10px + 5vh, 5px - 6%' * ``` * > **NB**: If you desire to apply offsets to your poppers in a way that may make them overlap * > with their reference element, unfortunately, you will have to disable the `flip` modifier. * > You can read more on this at this [issue](https://github.com/FezVrasta/popper.js/issues/373). * * @memberof modifiers * @inner */ offset: { /** @prop {number} order=200 - Index used to define the order of execution */ order: 200, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: offset, /** @prop {Number|String} offset=0 * The offset value as described in the modifier description */ offset: 0 }, /** * Modifier used to prevent the popper from being positioned outside the boundary. * * A scenario exists where the reference itself is not within the boundaries.
* We can say it has "escaped the boundaries" — or just "escaped".
* In this case we need to decide whether the popper should either: * * - detach from the reference and remain "trapped" in the boundaries, or * - if it should ignore the boundary and "escape with its reference" * * When `escapeWithReference` is set to`true` and reference is completely * outside its boundaries, the popper will overflow (or completely leave) * the boundaries in order to remain attached to the edge of the reference. * * @memberof modifiers * @inner */ preventOverflow: { /** @prop {number} order=300 - Index used to define the order of execution */ order: 300, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: preventOverflow, /** * @prop {Array} [priority=['left','right','top','bottom']] * Popper will try to prevent overflow following these priorities by default, * then, it could overflow on the left and on top of the `boundariesElement` */ priority: ['left', 'right', 'top', 'bottom'], /** * @prop {number} padding=5 * Amount of pixel used to define a minimum distance between the boundaries * and the popper. This makes sure the popper always has a little padding * between the edges of its container */ padding: 5, /** * @prop {String|HTMLElement} boundariesElement='scrollParent' * Boundaries used by the modifier. Can be `scrollParent`, `window`, * `viewport` or any DOM element. */ boundariesElement: 'scrollParent' }, /** * Modifier used to make sure the reference and its popper stay near each other * without leaving any gap between the two. Especially useful when the arrow is * enabled and you want to ensure that it points to its reference element. * It cares only about the first axis. You can still have poppers with margin * between the popper and its reference element. * @memberof modifiers * @inner */ keepTogether: { /** @prop {number} order=400 - Index used to define the order of execution */ order: 400, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: keepTogether }, /** * This modifier is used to move the `arrowElement` of the popper to make * sure it is positioned between the reference element and its popper element. * It will read the outer size of the `arrowElement` node to detect how many * pixels of conjunction are needed. * * It has no effect if no `arrowElement` is provided. * @memberof modifiers * @inner */ arrow: { /** @prop {number} order=500 - Index used to define the order of execution */ order: 500, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: arrow, /** @prop {String|HTMLElement} element='[x-arrow]' - Selector or node used as arrow */ element: '[x-arrow]' }, /** * Modifier used to flip the popper's placement when it starts to overlap its * reference element. * * Requires the `preventOverflow` modifier before it in order to work. * * **NOTE:** this modifier will interrupt the current update cycle and will * restart it if it detects the need to flip the placement. * @memberof modifiers * @inner */ flip: { /** @prop {number} order=600 - Index used to define the order of execution */ order: 600, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: flip, /** * @prop {String|Array} behavior='flip' * The behavior used to change the popper's placement. It can be one of * `flip`, `clockwise`, `counterclockwise` or an array with a list of valid * placements (with optional variations) */ behavior: 'flip', /** * @prop {number} padding=5 * The popper will flip if it hits the edges of the `boundariesElement` */ padding: 5, /** * @prop {String|HTMLElement} boundariesElement='viewport' * The element which will define the boundaries of the popper position. * The popper will never be placed outside of the defined boundaries * (except if `keepTogether` is enabled) */ boundariesElement: 'viewport', /** * @prop {Boolean} flipVariations=false * The popper will switch placement variation between `-start` and `-end` when * the reference element overlaps its boundaries. * * The original placement should have a set variation. */ flipVariations: false, /** * @prop {Boolean} flipVariationsByContent=false * The popper will switch placement variation between `-start` and `-end` when * the popper element overlaps its reference boundaries. * * The original placement should have a set variation. */ flipVariationsByContent: false }, /** * Modifier used to make the popper flow toward the inner of the reference element. * By default, when this modifier is disabled, the popper will be placed outside * the reference element. * @memberof modifiers * @inner */ inner: { /** @prop {number} order=700 - Index used to define the order of execution */ order: 700, /** @prop {Boolean} enabled=false - Whether the modifier is enabled or not */ enabled: false, /** @prop {ModifierFn} */ fn: inner }, /** * Modifier used to hide the popper when its reference element is outside of the * popper boundaries. It will set a `x-out-of-boundaries` attribute which can * be used to hide with a CSS selector the popper when its reference is * out of boundaries. * * Requires the `preventOverflow` modifier before it in order to work. * @memberof modifiers * @inner */ hide: { /** @prop {number} order=800 - Index used to define the order of execution */ order: 800, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: hide }, /** * Computes the style that will be applied to the popper element to gets * properly positioned. * * Note that this modifier will not touch the DOM, it just prepares the styles * so that `applyStyle` modifier can apply it. This separation is useful * in case you need to replace `applyStyle` with a custom implementation. * * This modifier has `850` as `order` value to maintain backward compatibility * with previous versions of Popper.js. Expect the modifiers ordering method * to change in future major versions of the library. * * @memberof modifiers * @inner */ computeStyle: { /** @prop {number} order=850 - Index used to define the order of execution */ order: 850, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: computeStyle, /** * @prop {Boolean} gpuAcceleration=true * If true, it uses the CSS 3D transformation to position the popper. * Otherwise, it will use the `top` and `left` properties */ gpuAcceleration: true, /** * @prop {string} [x='bottom'] * Where to anchor the X axis (`bottom` or `top`). AKA X offset origin. * Change this if your popper should grow in a direction different from `bottom` */ x: 'bottom', /** * @prop {string} [x='left'] * Where to anchor the Y axis (`left` or `right`). AKA Y offset origin. * Change this if your popper should grow in a direction different from `right` */ y: 'right' }, /** * Applies the computed styles to the popper element. * * All the DOM manipulations are limited to this modifier. This is useful in case * you want to integrate Popper.js inside a framework or view library and you * want to delegate all the DOM manipulations to it. * * Note that if you disable this modifier, you must make sure the popper element * has its position set to `absolute` before Popper.js can do its work! * * Just disable this modifier and define your own to achieve the desired effect. * * @memberof modifiers * @inner */ applyStyle: { /** @prop {number} order=900 - Index used to define the order of execution */ order: 900, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: applyStyle, /** @prop {Function} */ onLoad: applyStyleOnLoad, /** * @deprecated since version 1.10.0, the property moved to `computeStyle` modifier * @prop {Boolean} gpuAcceleration=true * If true, it uses the CSS 3D transformation to position the popper. * Otherwise, it will use the `top` and `left` properties */ gpuAcceleration: undefined } }; /** * The `dataObject` is an object containing all the information used by Popper.js. * This object is passed to modifiers and to the `onCreate` and `onUpdate` callbacks. * @name dataObject * @property {Object} data.instance The Popper.js instance * @property {String} data.placement Placement applied to popper * @property {String} data.originalPlacement Placement originally defined on init * @property {Boolean} data.flipped True if popper has been flipped by flip modifier * @property {Boolean} data.hide True if the reference element is out of boundaries, useful to know when to hide the popper * @property {HTMLElement} data.arrowElement Node used as arrow by arrow modifier * @property {Object} data.styles Any CSS property defined here will be applied to the popper. It expects the JavaScript nomenclature (eg. `marginBottom`) * @property {Object} data.arrowStyles Any CSS property defined here will be applied to the popper arrow. It expects the JavaScript nomenclature (eg. `marginBottom`) * @property {Object} data.boundaries Offsets of the popper boundaries * @property {Object} data.offsets The measurements of popper, reference and arrow elements * @property {Object} data.offsets.popper `top`, `left`, `width`, `height` values * @property {Object} data.offsets.reference `top`, `left`, `width`, `height` values * @property {Object} data.offsets.arrow] `top` and `left` offsets, only one of them will be different from 0 */ /** * Default options provided to Popper.js constructor.
* These can be overridden using the `options` argument of Popper.js.
* To override an option, simply pass an object with the same * structure of the `options` object, as the 3rd argument. For example: * ``` * new Popper(ref, pop, { * modifiers: { * preventOverflow: { enabled: false } * } * }) * ``` * @type {Object} * @static * @memberof Popper */ var Defaults = { /** * Popper's placement. * @prop {Popper.placements} placement='bottom' */ placement: 'bottom', /** * Set this to true if you want popper to position it self in 'fixed' mode * @prop {Boolean} positionFixed=false */ positionFixed: false, /** * Whether events (resize, scroll) are initially enabled. * @prop {Boolean} eventsEnabled=true */ eventsEnabled: true, /** * Set to true if you want to automatically remove the popper when * you call the `destroy` method. * @prop {Boolean} removeOnDestroy=false */ removeOnDestroy: false, /** * Callback called when the popper is created.
* By default, it is set to no-op.
* Access Popper.js instance with `data.instance`. * @prop {onCreate} */ onCreate: function onCreate() {}, /** * Callback called when the popper is updated. This callback is not called * on the initialization/creation of the popper, but only on subsequent * updates.
* By default, it is set to no-op.
* Access Popper.js instance with `data.instance`. * @prop {onUpdate} */ onUpdate: function onUpdate() {}, /** * List of modifiers used to modify the offsets before they are applied to the popper. * They provide most of the functionalities of Popper.js. * @prop {modifiers} */ modifiers: modifiers }; /** * @callback onCreate * @param {dataObject} data */ /** * @callback onUpdate * @param {dataObject} data */ // Utils // Methods var Popper = function () { /** * Creates a new Popper.js instance. * @class Popper * @param {Element|referenceObject} reference - The reference element used to position the popper * @param {Element} popper - The HTML / XML element used as the popper * @param {Object} options - Your custom options to override the ones defined in [Defaults](#defaults) * @return {Object} instance - The generated Popper.js instance */ function Popper(reference, popper) { var _this = this; var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {}; classCallCheck(this, Popper); this.scheduleUpdate = function () { return requestAnimationFrame(_this.update); }; // make update() debounced, so that it only runs at most once-per-tick this.update = debounce(this.update.bind(this)); // with {} we create a new object with the options inside it this.options = _extends({}, Popper.Defaults, options); // init state this.state = { isDestroyed: false, isCreated: false, scrollParents: [] }; // get reference and popper elements (allow jQuery wrappers) this.reference = reference && reference.jquery ? reference[0] : reference; this.popper = popper && popper.jquery ? popper[0] : popper; // Deep merge modifiers options this.options.modifiers = {}; Object.keys(_extends({}, Popper.Defaults.modifiers, options.modifiers)).forEach(function (name) { _this.options.modifiers[name] = _extends({}, Popper.Defaults.modifiers[name] || {}, options.modifiers ? options.modifiers[name] : {}); }); // Refactoring modifiers' list (Object => Array) this.modifiers = Object.keys(this.options.modifiers).map(function (name) { return _extends({ name: name }, _this.options.modifiers[name]); }) // sort the modifiers by order .sort(function (a, b) { return a.order - b.order; }); // modifiers have the ability to execute arbitrary code when Popper.js get inited // such code is executed in the same order of its modifier // they could add new properties to their options configuration // BE AWARE: don't add options to `options.modifiers.name` but to `modifierOptions`! this.modifiers.forEach(function (modifierOptions) { if (modifierOptions.enabled && isFunction(modifierOptions.onLoad)) { modifierOptions.onLoad(_this.reference, _this.popper, _this.options, modifierOptions, _this.state); } }); // fire the first update to position the popper in the right place this.update(); var eventsEnabled = this.options.eventsEnabled; if (eventsEnabled) { // setup event listeners, they will take care of update the position in specific situations this.enableEventListeners(); } this.state.eventsEnabled = eventsEnabled; } // We can't use class properties because they don't get listed in the // class prototype and break stuff like Sinon stubs createClass(Popper, [{ key: 'update', value: function update$$1() { return update.call(this); } }, { key: 'destroy', value: function destroy$$1() { return destroy.call(this); } }, { key: 'enableEventListeners', value: function enableEventListeners$$1() { return enableEventListeners.call(this); } }, { key: 'disableEventListeners', value: function disableEventListeners$$1() { return disableEventListeners.call(this); } /** * Schedules an update. It will run on the next UI update available. * @method scheduleUpdate * @memberof Popper */ /** * Collection of utilities useful when writing custom modifiers. * Starting from version 1.7, this method is available only if you * include `popper-utils.js` before `popper.js`. * * **DEPRECATION**: This way to access PopperUtils is deprecated * and will be removed in v2! Use the PopperUtils module directly instead. * Due to the high instability of the methods contained in Utils, we can't * guarantee them to follow semver. Use them at your own risk! * @static * @private * @type {Object} * @deprecated since version 1.8 * @member Utils * @memberof Popper */ }]); return Popper; }(); /** * The `referenceObject` is an object that provides an interface compatible with Popper.js * and lets you use it as replacement of a real DOM node.
* You can use this method to position a popper relatively to a set of coordinates * in case you don't have a DOM node to use as reference. * * ``` * new Popper(referenceObject, popperNode); * ``` * * NB: This feature isn't supported in Internet Explorer 10. * @name referenceObject * @property {Function} data.getBoundingClientRect * A function that returns a set of coordinates compatible with the native `getBoundingClientRect` method. * @property {number} data.clientWidth * An ES6 getter that will return the width of the virtual reference element. * @property {number} data.clientHeight * An ES6 getter that will return the height of the virtual reference element. */ Popper.Utils = (typeof window !== 'undefined' ? window : global).PopperUtils; Popper.placements = placements; Popper.Defaults = Defaults; /* harmony default export */ __webpack_exports__["a"] = (Popper); /* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(41))) /***/ }), /* 39 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.uncontrolledPropTypes = uncontrolledPropTypes; exports.isProp = isProp; exports.defaultKey = defaultKey; exports.canAcceptRef = canAcceptRef; var _invariant = _interopRequireDefault(__webpack_require__(24)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } var noop = function noop() {}; function readOnlyPropType(handler, name) { return function (props, propName) { if (props[propName] !== undefined) { if (!props[handler]) { return new Error("You have provided a `" + propName + "` prop to `" + name + "` " + ("without an `" + handler + "` handler prop. This will render a read-only field. ") + ("If the field should be mutable use `" + defaultKey(propName) + "`. ") + ("Otherwise, set `" + handler + "`.")); } } }; } function uncontrolledPropTypes(controlledValues, displayName) { var propTypes = {}; Object.keys(controlledValues).forEach(function (prop) { // add default propTypes for folks that use runtime checks propTypes[defaultKey(prop)] = noop; if (false) { var handler; } }); return propTypes; } function isProp(props, prop) { return props[prop] !== undefined; } function defaultKey(key) { return 'default' + key.charAt(0).toUpperCase() + key.substr(1); } /** * Copyright (c) 2013-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. */ function canAcceptRef(component) { return !!component && (typeof component !== 'function' || component.prototype && component.prototype.isReactComponent); } /***/ }), /* 40 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = camelizeStyleName; var _camelize = _interopRequireDefault(__webpack_require__(33)); /** * Copyright 2014-2015, Facebook, Inc. * All rights reserved. * https://github.com/facebook/react/blob/2aeb8a2a6beb00617a4217f7f8284924fa2ad819/src/vendor/core/camelizeStyleName.js */ var msPattern = /^-ms-/; function camelizeStyleName(string) { return (0, _camelize.default)(string.replace(msPattern, 'ms-')); } module.exports = exports["default"]; /***/ }), /* 41 */ /***/ (function(module, exports) { var g; // This works in non-strict mode g = function () { return this; }(); try { // This works if eval is allowed (see CSP) g = g || new Function("return this")(); } catch (e) { // This works if the window reference is available if (typeof window === "object") g = window; } // g can still be undefined, but nothing to do about it... // We return undefined, instead of nothing here, so it's // easier to handle this case. if(!global) { ...} module.exports = g; /***/ }), /* 42 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = void 0; var _contains = _interopRequireDefault(__webpack_require__(22)); var _listen = _interopRequireDefault(__webpack_require__(30)); var _propTypes = _interopRequireDefault(__webpack_require__(0)); var _react = _interopRequireDefault(__webpack_require__(1)); var _reactDom = _interopRequireDefault(__webpack_require__(6)); var _ownerDocument = _interopRequireDefault(__webpack_require__(45)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; } function _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return self; } var escapeKeyCode = 27; var noop = function noop() {}; function isLeftClickEvent(event) { return event.button === 0; } function isModifiedEvent(event) { return !!(event.metaKey || event.altKey || event.ctrlKey || event.shiftKey); } /** * The `` component registers your callback on the document * when rendered. Powers the `` component. This is used achieve modal * style behavior where your callback is triggered when the user tries to * interact with the rest of the document or hits the `esc` key. */ var RootCloseWrapper = /*#__PURE__*/ function (_React$Component) { _inheritsLoose(RootCloseWrapper, _React$Component); function RootCloseWrapper(props, context) { var _this; _this = _React$Component.call(this, props, context) || this; _this.addEventListeners = function () { var event = _this.props.event; var doc = (0, _ownerDocument.default)(_assertThisInitialized(_assertThisInitialized(_this))); // Use capture for this listener so it fires before React's listener, to // avoid false positives in the contains() check below if the target DOM // element is removed in the React mouse callback. _this.removeMouseCaptureListener = (0, _listen.default)(doc, event, _this.handleMouseCapture, true); _this.removeMouseListener = (0, _listen.default)(doc, event, _this.handleMouse); _this.removeKeyupListener = (0, _listen.default)(doc, 'keyup', _this.handleKeyUp); if ('ontouchstart' in doc.documentElement) { _this.mobileSafariHackListeners = [].slice.call(document.body.children).map(function (el) { return (0, _listen.default)(el, 'mousemove', noop); }); } }; _this.removeEventListeners = function () { if (_this.removeMouseCaptureListener) _this.removeMouseCaptureListener(); if (_this.removeMouseListener) _this.removeMouseListener(); if (_this.removeKeyupListener) _this.removeKeyupListener(); if (_this.mobileSafariHackListeners) _this.mobileSafariHackListeners.forEach(function (remove) { return remove(); }); }; _this.handleMouseCapture = function (e) { _this.preventMouseRootClose = isModifiedEvent(e) || !isLeftClickEvent(e) || (0, _contains.default)(_reactDom.default.findDOMNode(_assertThisInitialized(_assertThisInitialized(_this))), e.target); }; _this.handleMouse = function (e) { if (!_this.preventMouseRootClose && _this.props.onRootClose) { _this.props.onRootClose(e); } }; _this.handleKeyUp = function (e) { if (e.keyCode === escapeKeyCode && _this.props.onRootClose) { _this.props.onRootClose(e); } }; _this.preventMouseRootClose = false; return _this; } var _proto = RootCloseWrapper.prototype; _proto.componentDidMount = function componentDidMount() { if (!this.props.disabled) { this.addEventListeners(); } }; _proto.componentDidUpdate = function componentDidUpdate(prevProps) { if (!this.props.disabled && prevProps.disabled) { this.addEventListeners(); } else if (this.props.disabled && !prevProps.disabled) { this.removeEventListeners(); } }; _proto.componentWillUnmount = function componentWillUnmount() { if (!this.props.disabled) { this.removeEventListeners(); } }; _proto.render = function render() { return this.props.children; }; return RootCloseWrapper; }(_react.default.Component); RootCloseWrapper.displayName = 'RootCloseWrapper'; RootCloseWrapper.propTypes = { /** * Callback fired after click or mousedown. Also triggers when user hits `esc`. */ onRootClose: _propTypes.default.func, /** * Children to render. */ children: _propTypes.default.element, /** * Disable the the RootCloseWrapper, preventing it from triggering `onRootClose`. */ disabled: _propTypes.default.bool, /** * Choose which document mouse event to bind to. */ event: _propTypes.default.oneOf(['click', 'mousedown']) }; RootCloseWrapper.defaultProps = { event: 'click' }; var _default = RootCloseWrapper; exports.default = _default; module.exports = exports.default; /***/ }), /* 43 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = void 0; var _inDOM = _interopRequireDefault(__webpack_require__(10)); var on = function on() {}; if (_inDOM.default) { on = function () { if (document.addEventListener) return function (node, eventName, handler, capture) { return node.addEventListener(eventName, handler, capture || false); };else if (document.attachEvent) return function (node, eventName, handler) { return node.attachEvent('on' + eventName, function (e) { e = e || window.event; e.target = e.target || e.srcElement; e.currentTarget = node; handler.call(node, e); }); }; }(); } var _default = on; exports.default = _default; module.exports = exports["default"]; /***/ }), /* 44 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = void 0; var _inDOM = _interopRequireDefault(__webpack_require__(10)); var off = function off() {}; if (_inDOM.default) { off = function () { if (document.addEventListener) return function (node, eventName, handler, capture) { return node.removeEventListener(eventName, handler, capture || false); };else if (document.attachEvent) return function (node, eventName, handler) { return node.detachEvent('on' + eventName, handler); }; }(); } var _default = off; exports.default = _default; module.exports = exports["default"]; /***/ }), /* 45 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = _default; var _reactDom = _interopRequireDefault(__webpack_require__(6)); var _ownerDocument = _interopRequireDefault(__webpack_require__(16)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _default(componentOrElement) { return (0, _ownerDocument.default)(_reactDom.default.findDOMNode(componentOrElement)); } module.exports = exports.default; /***/ }), /* 46 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = forwardRef; var _react = _interopRequireDefault(__webpack_require__(1)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function forwardRef(renderFn, _ref) { var displayName = _ref.displayName, propTypes = _ref.propTypes, defaultProps = _ref.defaultProps, _ref$allowFallback = _ref.allowFallback, allowFallback = _ref$allowFallback === void 0 ? false : _ref$allowFallback; var render = function render(props, ref) { return renderFn(props, ref); }; Object.assign(render, { displayName: displayName }); if (_react.default.forwardRef || !allowFallback) return Object.assign(_react.default.forwardRef(render), { propTypes: propTypes, defaultProps: defaultProps }); return Object.assign(function (props) { return render(props, null); }, { displayName: displayName, propTypes: propTypes, defaultProps: defaultProps }); } /***/ }), /* 47 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = hasClass; function hasClass(element, className) { if (element.classList) return !!className && element.classList.contains(className);else return (" " + (element.className.baseVal || element.className) + " ").indexOf(" " + className + " ") !== -1; } module.exports = exports["default"]; /***/ }), /* 48 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = void 0; var _propTypes = _interopRequireDefault(__webpack_require__(0)); var _componentOrElement = _interopRequireDefault(__webpack_require__(20)); var _react = _interopRequireDefault(__webpack_require__(1)); var _reactDom = _interopRequireDefault(__webpack_require__(6)); var _WaitForContainer = _interopRequireDefault(__webpack_require__(49)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; } /** * The `` component renders its children into a new "subtree" outside of current component hierarchy. * You can think of it as a declarative `appendChild()`, or jQuery's `$.fn.appendTo()`. * The children of `` component will be appended to the `container` specified. */ var Portal = /*#__PURE__*/ function (_React$Component) { _inheritsLoose(Portal, _React$Component); function Portal() { return _React$Component.apply(this, arguments) || this; } var _proto = Portal.prototype; _proto.render = function render() { var _this = this; return this.props.children ? _react.default.createElement(_WaitForContainer.default, { container: this.props.container, onContainerResolved: this.props.onRendered }, function (container) { return _reactDom.default.createPortal(_this.props.children, container); }) : null; }; return Portal; }(_react.default.Component); Portal.displayName = 'Portal'; Portal.propTypes = { /** * A Node, Component instance, or function that returns either. The `container` will have the Portal children * appended to it. */ container: _propTypes.default.oneOfType([_componentOrElement.default, _propTypes.default.func]), onRendered: _propTypes.default.func }; var _default = Portal; exports.default = _default; module.exports = exports.default; /***/ }), /* 49 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = void 0; var _propTypes = _interopRequireDefault(__webpack_require__(0)); var _componentOrElement = _interopRequireDefault(__webpack_require__(20)); var _inDOM = _interopRequireDefault(__webpack_require__(10)); var _ownerDocument = _interopRequireDefault(__webpack_require__(16)); var _react = _interopRequireDefault(__webpack_require__(1)); var _reactDom = _interopRequireDefault(__webpack_require__(6)); var _getContainer = _interopRequireDefault(__webpack_require__(50)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return self; } function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; } var propTypes = { /** * A Node, Component instance, or function that returns either. The `container` will have the Portal children * appended to it. */ container: _propTypes.default.oneOfType([_componentOrElement.default, _propTypes.default.func]), onContainerResolved: _propTypes.default.func }; var WaitForContainer = /*#__PURE__*/ function (_React$Component) { _inheritsLoose(WaitForContainer, _React$Component); function WaitForContainer() { var _this; for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { args[_key] = arguments[_key]; } _this = _React$Component.call.apply(_React$Component, [this].concat(args)) || this; if (!_inDOM.default) return _assertThisInitialized(_this); var container = _this.props.container; if (typeof container === 'function') container = container(); if (container && !_reactDom.default.findDOMNode(container)) { // The container is a React component that has not yet been rendered. // Don't set the container node yet. return _assertThisInitialized(_this); } _this.setContainer(container); return _this; } var _proto = WaitForContainer.prototype; _proto.UNSAFE_componentWillReceiveProps = function UNSAFE_componentWillReceiveProps(nextProps) { if (nextProps.container !== this.props.container) { this.setContainer(nextProps.container); } }; _proto.componentDidMount = function componentDidMount() { if (!this._container) { this.setContainer(this.props.container); this.forceUpdate(this.props.onContainerResolved); } else if (this.props.onContainerResolved) { this.props.onContainerResolved(); } }; _proto.componentWillUnmount = function componentWillUnmount() { this._container = null; }; _proto.setContainer = function setContainer(container) { this._container = (0, _getContainer.default)(container, (0, _ownerDocument.default)().body); }; _proto.render = function render() { return this._container ? this.props.children(this._container) : null; }; return WaitForContainer; }(_react.default.Component); WaitForContainer.propTypes = propTypes; var _default = WaitForContainer; exports.default = _default; module.exports = exports.default; /***/ }), /* 50 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = getContainer; var _reactDom = _interopRequireDefault(__webpack_require__(6)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function getContainer(container, defaultContainer) { if (container == null) return defaultContainer; container = typeof container === 'function' ? container() : container; return _reactDom.default.findDOMNode(container) || null; } module.exports = exports.default; /***/ }), /* 51 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = void 0; var _end = _interopRequireDefault(__webpack_require__(23)); exports.end = _end.default; var _properties = _interopRequireDefault(__webpack_require__(26)); exports.properties = _properties.default; var _default = { end: _end.default, properties: _properties.default }; exports.default = _default; /***/ }), /* 52 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = void 0; var _matches = _interopRequireDefault(__webpack_require__(72)); var _querySelectorAll = _interopRequireDefault(__webpack_require__(9)); var _react = _interopRequireDefault(__webpack_require__(1)); var _reactDom = _interopRequireDefault(__webpack_require__(6)); var _propTypes = _interopRequireDefault(__webpack_require__(0)); var _uncontrollable = _interopRequireDefault(__webpack_require__(7)); var Popper = _interopRequireWildcard(__webpack_require__(31)); var _DropdownContext = _interopRequireDefault(__webpack_require__(29)); var _DropdownMenu = _interopRequireDefault(__webpack_require__(35)); var _DropdownToggle = _interopRequireDefault(__webpack_require__(36)); function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { var desc = Object.defineProperty && Object.getOwnPropertyDescriptor ? Object.getOwnPropertyDescriptor(obj, key) : {}; if (desc.get || desc.set) { Object.defineProperty(newObj, key, desc); } else { newObj[key] = obj[key]; } } } } newObj.default = obj; return newObj; } } function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; } function _extends() { _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); } function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; } var propTypes = { /** * A render prop that returns the root dropdown element. The `props` * argument should spread through to an element containing _both_ the * menu and toggle in order to handle keyboard events for focus management. * * @type {Function ({ * props: { * onKeyDown: (SyntheticEvent) => void, * }, * }) => React.Element} */ children: _propTypes.default.func.isRequired, /** * Determines the direction and location of the Menu in relation to it's Toggle. */ drop: _propTypes.default.oneOf(['up', 'left', 'right', 'down']), /** * Controls the focus behavior for when the Dropdown is opened. Set to * `true` to always focus the first menu item, `keyboard` to focus only when * navigating via the keyboard, or `false` to disable completely * * The Default behavior is `false` **unless** the Menu has a `role="menu"` * where it will default to `keyboard` to match the recommended [ARIA Authoring practices](https://www.w3.org/TR/wai-aria-practices-1.1/#menubutton). */ focusFirstItemOnShow: _propTypes.default.oneOf([false, true, 'keyboard']), /** * A css slector string that will return __focusable__ menu items. * Selectors should be relative to the menu component: * e.g. ` > li:not('.disabled')` */ itemSelector: _propTypes.default.string.isRequired, /** * Align the menu to the 'end' side of the placement side of the Dropdown toggle. The default placement is `top-start` or `bottom-start`. */ alignEnd: _propTypes.default.bool, /** * Whether or not the Dropdown is visible. * * @controllable onToggle */ show: _propTypes.default.bool, /** * A callback fired when the Dropdown wishes to change visibility. Called with the requested * `show` value, the DOM event, and the source that fired it: `'click'`,`'keydown'`,`'rootClose'`, or `'select'`. * * ```js * function( * isOpen: boolean, * event: SyntheticEvent, * ): void * ``` * * @controllable show */ onToggle: _propTypes.default.func }; var defaultProps = { itemSelector: '* > *' }; /** * `Dropdown` is set of structural components for building, accessible dropdown menus with close-on-click, * keyboard navigation, and correct focus handling. As with all the react-overlay's * components its BYOS (bring your own styles). Dropdown is primarily * built from three base components, you should compose to build your Dropdowns. * * - `Dropdown`, which wraps the menu and toggle, and handles keyboard navigation * - `Dropdown.Toggle` generally a button that triggers the menu opening * - `Dropdown.Menu` The overlaid, menu, positioned to the toggle with PopperJs */ var Dropdown = /*#__PURE__*/ function (_React$Component) { _inheritsLoose(Dropdown, _React$Component); Dropdown.getDerivedStateFromProps = function getDerivedStateFromProps(_ref, prevState) { var drop = _ref.drop, alignEnd = _ref.alignEnd, show = _ref.show; var lastShow = prevState.context.show; return { lastShow: lastShow, context: _extends({}, prevState.context, { drop: drop, show: show, alignEnd: alignEnd }) }; }; function Dropdown(props, context) { var _this; _this = _React$Component.call(this, props, context) || this; _this.handleClick = function (event) { _this.toggleOpen(event); }; _this.handleKeyDown = function (event) { var key = event.key, target = event.target; // Second only to https://github.com/twbs/bootstrap/blob/8cfbf6933b8a0146ac3fbc369f19e520bd1ebdac/js/src/dropdown.js#L400 // in inscrutability var isInput = /input|textarea/i.test(target.tagName); if (isInput && (key === ' ' || key !== 'Escape' && _this.menu.contains(target))) { return; } _this._lastSourceEvent = event.type; switch (key) { case 'ArrowUp': { var next = _this.getNextFocusedChild(target, -1); if (next && next.focus) next.focus(); event.preventDefault(); return; } case 'ArrowDown': event.preventDefault(); if (!_this.props.show) { _this.toggleOpen(event); } else { var _next = _this.getNextFocusedChild(target, 1); if (_next && _next.focus) _next.focus(); } return; case 'Escape': case 'Tab': _this.props.onToggle(false, event); break; default: } }; _this._focusInDropdown = false; _this.menu = null; _this.state = { context: { close: _this.handleClose, toggle: _this.handleClick, menuRef: function menuRef(r) { _this.menu = r; }, toggleRef: function toggleRef(r) { var toggleNode = r && _reactDom.default.findDOMNode(r); _this.setState(function (_ref2) { var context = _ref2.context; return { context: _extends({}, context, { toggleNode: toggleNode }) }; }); } } }; return _this; } var _proto = Dropdown.prototype; _proto.componentDidUpdate = function componentDidUpdate(prevProps) { var show = this.props.show; var prevOpen = prevProps.show; if (show && !prevOpen) { this.maybeFocusFirst(); } this._lastSourceEvent = null; if (!show && prevOpen) { // if focus hasn't already moved from the menu let's return it // to the toggle if (this._focusInDropdown) { this._focusInDropdown = false; this.focus(); } } }; _proto.getNextFocusedChild = function getNextFocusedChild(current, offset) { if (!this.menu) return null; var itemSelector = this.props.itemSelector; var items = (0, _querySelectorAll.default)(this.menu, itemSelector); var index = items.indexOf(current) + offset; index = Math.max(0, Math.min(index, items.length)); return items[index]; }; _proto.hasMenuRole = function hasMenuRole() { return this.menu && (0, _matches.default)(this.menu, '[role=menu]'); }; _proto.focus = function focus() { var toggleNode = this.state.context.toggleNode; if (toggleNode && toggleNode.focus) { toggleNode.focus(); } }; _proto.maybeFocusFirst = function maybeFocusFirst() { var type = this._lastSourceEvent; var focusFirstItemOnShow = this.props.focusFirstItemOnShow; if (focusFirstItemOnShow == null) { focusFirstItemOnShow = this.hasMenuRole() ? 'keyboard' : false; } if (focusFirstItemOnShow === false || focusFirstItemOnShow === 'keyboard' && !/^key.+$/.test(type)) { return; } var itemSelector = this.props.itemSelector; var first = (0, _querySelectorAll.default)(this.menu, itemSelector)[0]; if (first && first.focus) first.focus(); }; _proto.toggleOpen = function toggleOpen(event) { var show = !this.props.show; this.props.onToggle(show, event); }; _proto.render = function render() { var _this$props = this.props, children = _this$props.children, props = _objectWithoutPropertiesLoose(_this$props, ["children"]); delete props.onToggle; if (this.menu && this.state.lastShow && !this.props.show) { this._focusInDropdown = this.menu.contains(document.activeElement); } return _react.default.createElement(_DropdownContext.default.Provider, { value: this.state.context }, _react.default.createElement(Popper.Manager, null, children({ props: { onKeyDown: this.handleKeyDown } }))); }; return Dropdown; }(_react.default.Component); Dropdown.displayName = 'ReactOverlaysDropdown'; Dropdown.propTypes = propTypes; Dropdown.defaultProps = defaultProps; var UncontrolledDropdown = (0, _uncontrollable.default)(Dropdown, { show: 'onToggle' }); UncontrolledDropdown.Menu = _DropdownMenu.default; UncontrolledDropdown.Toggle = _DropdownToggle.default; var _default = UncontrolledDropdown; exports.default = _default; module.exports = exports.default; /***/ }), /* 53 */ /***/ (function(module, exports) { function _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; } module.exports = _objectWithoutPropertiesLoose; /***/ }), /* 54 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; var _react = __webpack_require__(1); var _react2 = _interopRequireDefault(_react); var _implementation = __webpack_require__(73); var _implementation2 = _interopRequireDefault(_implementation); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } exports.default = _react2.default.createContext || _implementation2.default; module.exports = exports['default']; /***/ }), /* 55 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = mapContextToProps; var _react = _interopRequireDefault(__webpack_require__(1)); var _forwardRef = _interopRequireDefault(__webpack_require__(32)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _extends() { _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); } var getDisplayName = function getDisplayName(Component) { var name = typeof Component === 'string' ? Component : Component.name || Component.displayName; return name ? "ContextTransform(" + name + ")" : 'ContextTransform'; }; var ensureConsumer = function ensureConsumer(c) { return c.Consumer || c; }; function $mapContextToProps(_ref, Component) { var maybeArrayOfConsumers = _ref.consumers, mapToProps = _ref.mapToProps, displayName = _ref.displayName, _ref$forwardRefAs = _ref.forwardRefAs, forwardRefAs = _ref$forwardRefAs === void 0 ? 'ref' : _ref$forwardRefAs; var consumers = maybeArrayOfConsumers; if (!Array.isArray(maybeArrayOfConsumers)) { consumers = [maybeArrayOfConsumers]; } var SingleConsumer = ensureConsumer(consumers[0]); function singleRender(props, ref) { var _extends2; var propsWithRef = _extends((_extends2 = {}, _extends2[forwardRefAs] = ref, _extends2), props); return _react.default.createElement(SingleConsumer, null, function (value) { return _react.default.createElement(Component, _extends({}, propsWithRef, mapToProps(value, props))); }); } function multiRender(props, ref) { var _extends3; var propsWithRef = _extends((_extends3 = {}, _extends3[forwardRefAs] = ref, _extends3), props); return consumers.reduceRight(function (inner, Context) { return function () { for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { args[_key] = arguments[_key]; } var Consumer = ensureConsumer(Context); return _react.default.createElement(Consumer, null, function (value) { return inner.apply(void 0, args.concat([value])); }); }; }, function () { for (var _len2 = arguments.length, contexts = new Array(_len2), _key2 = 0; _key2 < _len2; _key2++) { contexts[_key2] = arguments[_key2]; } return _react.default.createElement(Component, _extends({}, propsWithRef, mapToProps.apply(void 0, contexts.concat([props])))); })(); } var contextTransform = consumers.length === 1 ? singleRender : multiRender; return (0, _forwardRef.default)(contextTransform, { displayName: displayName || getDisplayName(Component) }); } function mapContextToProps(maybeOpts, mapToProps, Component) { if (arguments.length === 2) return $mapContextToProps(maybeOpts, mapToProps); return $mapContextToProps({ consumers: maybeOpts, mapToProps: mapToProps }, Component); } /***/ }), /* 56 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = void 0; var _activeElement = _interopRequireDefault(__webpack_require__(79)); var _contains = _interopRequireDefault(__webpack_require__(22)); var _inDOM = _interopRequireDefault(__webpack_require__(10)); var _listen = _interopRequireDefault(__webpack_require__(30)); var _propTypes = _interopRequireDefault(__webpack_require__(0)); var _componentOrElement = _interopRequireDefault(__webpack_require__(20)); var _elementType = _interopRequireDefault(__webpack_require__(28)); var _react = _interopRequireDefault(__webpack_require__(1)); var _reactDom = _interopRequireDefault(__webpack_require__(6)); var _ModalManager = _interopRequireDefault(__webpack_require__(37)); var _Portal = _interopRequireDefault(__webpack_require__(48)); var _getContainer = _interopRequireDefault(__webpack_require__(50)); var _ownerDocument = _interopRequireDefault(__webpack_require__(45)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _extends() { _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); } function _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; } function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; } function _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return self; } var modalManager = new _ModalManager.default(); function omitProps(props, propTypes) { var keys = Object.keys(props); var newProps = {}; keys.map(function (prop) { if (!Object.prototype.hasOwnProperty.call(propTypes, prop)) { newProps[prop] = props[prop]; } }); return newProps; } /** * Love them or hate them, `` provides a solid foundation for creating dialogs, lightboxes, or whatever else. * The Modal component renders its `children` node in front of a backdrop component. * * The Modal offers a few helpful features over using just a `` component and some styles: * * - Manages dialog stacking when one-at-a-time just isn't enough. * - Creates a backdrop, for disabling interaction below the modal. * - It properly manages focus; moving to the modal content, and keeping it there until the modal is closed. * - It disables scrolling of the page content while open. * - Adds the appropriate ARIA roles are automatically. * - Easily pluggable animations via a `` component. * * Note that, in the same way the backdrop element prevents users from clicking or interacting * with the page content underneath the Modal, Screen readers also need to be signaled to not to * interact with page content while the Modal is open. To do this, we use a common technique of applying * the `aria-hidden='true'` attribute to the non-Modal elements in the Modal `container`. This means that for * a Modal to be truly modal, it should have a `container` that is _outside_ your app's * React hierarchy (such as the default: document.body). */ var Modal = /*#__PURE__*/ function (_React$Component) { _inheritsLoose(Modal, _React$Component); function Modal() { var _this; for (var _len = arguments.length, _args = new Array(_len), _key = 0; _key < _len; _key++) { _args[_key] = arguments[_key]; } _this = _React$Component.call.apply(_React$Component, [this].concat(_args)) || this; _this.state = { exited: !_this.props.show }; _this.onPortalRendered = function () { if (_this.props.onShow) { _this.props.onShow(); } // autofocus after onShow, to not trigger a focus event for previous // modals before this one is shown. _this.autoFocus(); }; _this.onShow = function () { var doc = (0, _ownerDocument.default)(_assertThisInitialized(_assertThisInitialized(_this))); var container = (0, _getContainer.default)(_this.props.container, doc.body); _this.props.manager.add(_assertThisInitialized(_assertThisInitialized(_this)), container, _this.props.containerClassName); _this.removeKeydownListener = (0, _listen.default)(doc, 'keydown', _this.handleDocumentKeyDown); _this.removeFocusListener = (0, _listen.default)(doc, 'focus', // the timeout is necessary b/c this will run before the new modal is mounted // and so steals focus from it function () { return setTimeout(_this.enforceFocus); }, true); }; _this.onHide = function () { _this.props.manager.remove(_assertThisInitialized(_assertThisInitialized(_this))); _this.removeKeydownListener(); _this.removeFocusListener(); if (_this.props.restoreFocus) { _this.restoreLastFocus(); } }; _this.setDialogRef = function (ref) { _this.dialog = ref; }; _this.setBackdropRef = function (ref) { _this.backdrop = ref && _reactDom.default.findDOMNode(ref); }; _this.handleHidden = function () { _this.setState({ exited: true }); _this.onHide(); if (_this.props.onExited) { var _this$props; (_this$props = _this.props).onExited.apply(_this$props, arguments); } }; _this.handleBackdropClick = function (e) { if (e.target !== e.currentTarget) { return; } if (_this.props.onBackdropClick) { _this.props.onBackdropClick(e); } if (_this.props.backdrop === true) { _this.props.onHide(); } }; _this.handleDocumentKeyDown = function (e) { if (_this.props.keyboard && e.keyCode === 27 && _this.isTopModal()) { if (_this.props.onEscapeKeyDown) { _this.props.onEscapeKeyDown(e); } _this.props.onHide(); } }; _this.enforceFocus = function () { if (!_this.props.enforceFocus || !_this._isMounted || !_this.isTopModal()) { return; } var currentActiveElement = (0, _activeElement.default)((0, _ownerDocument.default)(_assertThisInitialized(_assertThisInitialized(_this)))); if (_this.dialog && !(0, _contains.default)(_this.dialog, currentActiveElement)) { _this.dialog.focus(); } }; _this.renderBackdrop = function () { var _this$props2 = _this.props, renderBackdrop = _this$props2.renderBackdrop, Transition = _this$props2.backdropTransition; var backdrop = renderBackdrop({ ref: _this.setBackdropRef, onClick: _this.handleBackdropClick }); if (Transition) { backdrop = _react.default.createElement(Transition, { appear: true, in: _this.props.show }, backdrop); } return backdrop; }; return _this; } Modal.getDerivedStateFromProps = function getDerivedStateFromProps(nextProps) { if (nextProps.show) { return { exited: false }; } else if (!nextProps.transition) { // Otherwise let handleHidden take care of marking exited. return { exited: true }; } return null; }; var _proto = Modal.prototype; _proto.getSnapshotBeforeUpdate = function getSnapshotBeforeUpdate(prevProps) { if (_inDOM.default && !prevProps.show && this.props.show) { this.lastFocus = (0, _activeElement.default)(); } return null; }; _proto.componentDidMount = function componentDidMount() { this._isMounted = true; if (this.props.show) { this.onShow(); } }; _proto.componentDidUpdate = function componentDidUpdate(prevProps) { var transition = this.props.transition; if (prevProps.show && !this.props.show && !transition) { // Otherwise handleHidden will call this. this.onHide(); } else if (!prevProps.show && this.props.show) { this.onShow(); } }; _proto.componentWillUnmount = function componentWillUnmount() { var _this$props3 = this.props, show = _this$props3.show, transition = _this$props3.transition; this._isMounted = false; if (show || transition && !this.state.exited) { this.onHide(); } }; _proto.autoFocus = function autoFocus() { if (!this.props.autoFocus) return; var currentActiveElement = (0, _activeElement.default)((0, _ownerDocument.default)(this)); if (this.dialog && !(0, _contains.default)(this.dialog, currentActiveElement)) { this.lastFocus = currentActiveElement; this.dialog.focus(); } }; _proto.restoreLastFocus = function restoreLastFocus() { // Support: <=IE11 doesn't support `focus()` on svg elements (RB: #917) if (this.lastFocus && this.lastFocus.focus) { this.lastFocus.focus(); this.lastFocus = null; } }; _proto.isTopModal = function isTopModal() { return this.props.manager.isTopModal(this); }; _proto.render = function render() { var _this$props4 = this.props, show = _this$props4.show, container = _this$props4.container, children = _this$props4.children, renderDialog = _this$props4.renderDialog, _this$props4$role = _this$props4.role, role = _this$props4$role === void 0 ? 'dialog' : _this$props4$role, Transition = _this$props4.transition, backdrop = _this$props4.backdrop, className = _this$props4.className, style = _this$props4.style, onExit = _this$props4.onExit, onExiting = _this$props4.onExiting, onEnter = _this$props4.onEnter, onEntering = _this$props4.onEntering, onEntered = _this$props4.onEntered, props = _objectWithoutPropertiesLoose(_this$props4, ["show", "container", "children", "renderDialog", "role", "transition", "backdrop", "className", "style", "onExit", "onExiting", "onEnter", "onEntering", "onEntered"]); if (!(show || Transition && !this.state.exited)) { return null; } var dialogProps = _extends({ role: role, ref: this.setDialogRef, // apparently only works on the dialog role element 'aria-modal': role === 'dialog' ? true : undefined }, omitProps(props, Modal.propTypes), { style: style, className: className, tabIndex: '-1' }); var dialog = renderDialog ? renderDialog(dialogProps) : _react.default.createElement("div", dialogProps, _react.default.cloneElement(children, { role: 'document' })); if (Transition) { dialog = _react.default.createElement(Transition, { appear: true, unmountOnExit: true, in: show, onExit: onExit, onExiting: onExiting, onExited: this.handleHidden, onEnter: onEnter, onEntering: onEntering, onEntered: onEntered }, dialog); } return _react.default.createElement(_Portal.default, { container: container, onRendered: this.onPortalRendered }, _react.default.createElement(_react.default.Fragment, null, backdrop && this.renderBackdrop(), dialog)); }; return Modal; }(_react.default.Component); Modal.propTypes = { /** * Set the visibility of the Modal */ show: _propTypes.default.bool, /** * A Node, Component instance, or function that returns either. The Modal is appended to it's container element. * * For the sake of assistive technologies, the container should usually be the document body, so that the rest of the * page content can be placed behind a virtual backdrop as well as a visual one. */ container: _propTypes.default.oneOfType([_componentOrElement.default, _propTypes.default.func]), /** * A callback fired when the Modal is opening. */ onShow: _propTypes.default.func, /** * A callback fired when either the backdrop is clicked, or the escape key is pressed. * * The `onHide` callback only signals intent from the Modal, * you must actually set the `show` prop to `false` for the Modal to close. */ onHide: _propTypes.default.func, /** * Include a backdrop component. */ backdrop: _propTypes.default.oneOfType([_propTypes.default.bool, _propTypes.default.oneOf(['static'])]), /** * A function that returns the dialog component. Useful for custom * rendering. **Note:** the component should make sure to apply the provided ref. * * ```js * renderDialog={props => } * ``` */ renderDialog: _propTypes.default.func, /** * A function that returns a backdrop component. Useful for custom * backdrop rendering. * * ```js * renderBackdrop={props => } * ``` */ renderBackdrop: _propTypes.default.func, /** * A callback fired when the escape key, if specified in `keyboard`, is pressed. */ onEscapeKeyDown: _propTypes.default.func, /** * A callback fired when the backdrop, if specified, is clicked. */ onBackdropClick: _propTypes.default.func, /** * A css class or set of classes applied to the modal container when the modal is open, * and removed when it is closed. */ containerClassName: _propTypes.default.string, /** * Close the modal when escape key is pressed */ keyboard: _propTypes.default.bool, /** * A `react-transition-group@2.0.0` `` component used * to control animations for the dialog component. */ transition: _elementType.default, /** * A `react-transition-group@2.0.0` `` component used * to control animations for the backdrop components. */ backdropTransition: _elementType.default, /** * When `true` The modal will automatically shift focus to itself when it opens, and * replace it to the last focused element when it closes. This also * works correctly with any Modal children that have the `autoFocus` prop. * * Generally this should never be set to `false` as it makes the Modal less * accessible to assistive technologies, like screen readers. */ autoFocus: _propTypes.default.bool, /** * When `true` The modal will prevent focus from leaving the Modal while open. * * Generally this should never be set to `false` as it makes the Modal less * accessible to assistive technologies, like screen readers. */ enforceFocus: _propTypes.default.bool, /** * When `true` The modal will restore focus to previously focused element once * modal is hidden */ restoreFocus: _propTypes.default.bool, /** * Callback fired before the Modal transitions in */ onEnter: _propTypes.default.func, /** * Callback fired as the Modal begins to transition in */ onEntering: _propTypes.default.func, /** * Callback fired after the Modal finishes transitioning in */ onEntered: _propTypes.default.func, /** * Callback fired right before the Modal transitions out */ onExit: _propTypes.default.func, /** * Callback fired as the Modal begins to transition out */ onExiting: _propTypes.default.func, /** * Callback fired after the Modal finishes transitioning out */ onExited: _propTypes.default.func, /** * A ModalManager instance used to track and manage the state of open * Modals. Useful when customizing how modals interact within a container */ manager: _propTypes.default.object.isRequired }; Modal.defaultProps = { show: false, role: 'dialog', backdrop: true, keyboard: true, autoFocus: true, enforceFocus: true, restoreFocus: true, onHide: function onHide() {}, manager: modalManager, renderBackdrop: function renderBackdrop(props) { return _react.default.createElement("div", props); } }; Modal.Manager = _ModalManager.default; var _default = Modal; exports.default = _default; module.exports = exports.default; /***/ }), /* 57 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = void 0; var _propTypes = _interopRequireDefault(__webpack_require__(0)); var _elementType = _interopRequireDefault(__webpack_require__(28)); var _componentOrElement = _interopRequireDefault(__webpack_require__(20)); var _react = _interopRequireDefault(__webpack_require__(1)); var _reactDom = _interopRequireDefault(__webpack_require__(6)); var _Portal = _interopRequireDefault(__webpack_require__(48)); var _RootCloseWrapper = _interopRequireDefault(__webpack_require__(42)); var _reactPopper = __webpack_require__(31); var _forwardRef = _interopRequireDefault(__webpack_require__(46)); var _WaitForContainer = _interopRequireDefault(__webpack_require__(49)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _extends() { _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); } function _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; } function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; } function _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return self; } /** * Built on top of `` and ``, the overlay component is * great for custom tooltip overlays. */ var Overlay = /*#__PURE__*/ function (_React$Component) { _inheritsLoose(Overlay, _React$Component); function Overlay(props, context) { var _this; _this = _React$Component.call(this, props, context) || this; _this.handleHidden = function () { _this.setState({ exited: true }); if (_this.props.onExited) { var _this$props; (_this$props = _this.props).onExited.apply(_this$props, arguments); } }; _this.state = { exited: !props.show }; _this.onHiddenListener = _this.handleHidden.bind(_assertThisInitialized(_assertThisInitialized(_this))); _this._lastTarget = null; return _this; } Overlay.getDerivedStateFromProps = function getDerivedStateFromProps(nextProps) { if (nextProps.show) { return { exited: false }; } else if (!nextProps.transition) { // Otherwise let handleHidden take care of marking exited. return { exited: true }; } return null; }; var _proto = Overlay.prototype; _proto.componentDidMount = function componentDidMount() { this.setState({ target: this.getTarget() }); }; _proto.componentDidUpdate = function componentDidUpdate(prevProps) { if (this.props === prevProps) return; var target = this.getTarget(); if (target !== this.state.target) { this.setState({ target: target }); } }; _proto.getTarget = function getTarget() { var target = this.props.target; target = typeof target === 'function' ? target() : target; return target && _reactDom.default.findDOMNode(target) || null; }; _proto.render = function render() { var _this2 = this; var _this$props2 = this.props, _0 = _this$props2.target, container = _this$props2.container, containerPadding = _this$props2.containerPadding, placement = _this$props2.placement, rootClose = _this$props2.rootClose, children = _this$props2.children, flip = _this$props2.flip, _this$props2$popperCo = _this$props2.popperConfig, popperConfig = _this$props2$popperCo === void 0 ? {} : _this$props2$popperCo, Transition = _this$props2.transition, props = _objectWithoutPropertiesLoose(_this$props2, ["target", "container", "containerPadding", "placement", "rootClose", "children", "flip", "popperConfig", "transition"]); var target = this.state.target; // Don't un-render the overlay while it's transitioning out. var mountOverlay = props.show || Transition && !this.state.exited; if (!mountOverlay) { // Don't bother showing anything if we don't have to. return null; } var child = children; var _popperConfig$modifie = popperConfig.modifiers, modifiers = _popperConfig$modifie === void 0 ? {} : _popperConfig$modifie; var popperProps = _extends({}, popperConfig, { placement: placement, referenceElement: target, enableEvents: props.show, modifiers: _extends({}, modifiers, { preventOverflow: _extends({ padding: containerPadding || 5 }, modifiers.preventOverflow), flip: _extends({ enabled: !!flip }, modifiers.preventOverflow) }) }); child = _react.default.createElement(_reactPopper.Popper, popperProps, function (_ref) { var arrowProps = _ref.arrowProps, style = _ref.style, ref = _ref.ref, popper = _objectWithoutPropertiesLoose(_ref, ["arrowProps", "style", "ref"]); _this2.popper = popper; var innerChild = _this2.props.children(_extends({}, popper, { // popper doesn't set the initial placement placement: popper.placement || placement, show: props.show, arrowProps: arrowProps, props: { ref: ref, style: style } })); if (Transition) { var onExit = props.onExit, onExiting = props.onExiting, onEnter = props.onEnter, onEntering = props.onEntering, onEntered = props.onEntered; innerChild = _react.default.createElement(Transition, { in: props.show, appear: true, onExit: onExit, onExiting: onExiting, onExited: _this2.onHiddenListener, onEnter: onEnter, onEntering: onEntering, onEntered: onEntered }, innerChild); } return innerChild; }); if (rootClose) { child = _react.default.createElement(_RootCloseWrapper.default, { onRootClose: props.onHide, event: props.rootCloseEvent, disabled: props.rootCloseDisabled }, child); } return _react.default.createElement(_Portal.default, { container: container }, child); }; return Overlay; }(_react.default.Component); Overlay.propTypes = _extends({}, _Portal.default.propTypes, { /** * Set the visibility of the Overlay */ show: _propTypes.default.bool, /** Specify where the overlay element is positioned in relation to the target element */ placement: _propTypes.default.oneOf(_reactPopper.placements), /** * A Node, Component instance, or function that returns either. The `container` will have the Portal children * appended to it. */ container: _propTypes.default.oneOfType([_componentOrElement.default, _propTypes.default.func]), /** * Enables the Popper.js `flip` modifier, allowing the Overlay to * automatically adjust it's placement in case of overlap with the viewport or toggle. * Refer to the [flip docs](https://popper.js.org/popper-documentation.html#modifiers..flip.enabled) for more info */ flip: _propTypes.default.bool, /** * A render prop that returns an element to overlay and position. See * the [react-popper documentation](https://github.com/FezVrasta/react-popper#children) for more info. * * @type {Function ({ * show: boolean, * placement: Placement, * outOfBoundaries: ?boolean, * scheduleUpdate: () => void, * props: { * ref: (?HTMLElement) => void, * style: { [string]: string | number }, * aria-labelledby: ?string * }, * arrowProps: { * ref: (?HTMLElement) => void, * style: { [string]: string | number }, * }, * }) => React.Element} */ children: _propTypes.default.func.isRequired, /** * A set of popper options and props passed directly to react-popper's Popper component. */ popperConfig: _propTypes.default.object, /** * Specify whether the overlay should trigger `onHide` when the user clicks outside the overlay */ rootClose: _propTypes.default.bool, /** * Specify event for toggling overlay */ rootCloseEvent: _RootCloseWrapper.default.propTypes.event, /** * Specify disabled for disable RootCloseWrapper */ rootCloseDisabled: _RootCloseWrapper.default.propTypes.disabled, /** * A Callback fired by the Overlay when it wishes to be hidden. * * __required__ when `rootClose` is `true`. * * @type func */ onHide: function onHide(props) { var propType = _propTypes.default.func; if (props.rootClose) { propType = propType.isRequired; } for (var _len = arguments.length, args = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) { args[_key - 1] = arguments[_key]; } return propType.apply(void 0, [props].concat(args)); }, /** * A `react-transition-group@2.0.0` `` component * used to animate the overlay as it changes visibility. */ transition: _elementType.default, /** * Callback fired before the Overlay transitions in */ onEnter: _propTypes.default.func, /** * Callback fired as the Overlay begins to transition in */ onEntering: _propTypes.default.func, /** * Callback fired after the Overlay finishes transitioning in */ onEntered: _propTypes.default.func, /** * Callback fired right before the Overlay transitions out */ onExit: _propTypes.default.func, /** * Callback fired as the Overlay begins to transition out */ onExiting: _propTypes.default.func, /** * Callback fired after the Overlay finishes transitioning out */ onExited: _propTypes.default.func }); var _default = (0, _forwardRef.default)(function (props, ref) { return (// eslint-disable-next-line react/prop-types _react.default.createElement(_WaitForContainer.default, { container: props.container }, function (container) { return _react.default.createElement(Overlay, _extends({}, props, { ref: ref, container: container })); }) ); }, { displayName: 'withContainer(Overlay)' }); exports.default = _default; module.exports = exports.default; /***/ }), /* 58 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; /** * Copyright (c) 2013-present, Facebook, Inc. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ var ReactPropTypesSecret = __webpack_require__(59); function emptyFunction() {} function emptyFunctionWithReset() {} emptyFunctionWithReset.resetWarningCache = emptyFunction; module.exports = function () { function shim(props, propName, componentName, location, propFullName, secret) { if (secret === ReactPropTypesSecret) { // It is still safe when called from React. return; } var err = new Error('Calling PropTypes validators directly is not supported by the `prop-types` package. ' + 'Use PropTypes.checkPropTypes() to call them. ' + 'Read more at http://fb.me/use-check-prop-types'); err.name = 'Invariant Violation'; throw err; } ; shim.isRequired = shim; function getShim() { return shim; } ; // Important! // Keep this list in sync with production version in `./factoryWithTypeCheckers.js`. var ReactPropTypes = { array: shim, bool: shim, func: shim, number: shim, object: shim, string: shim, symbol: shim, any: shim, arrayOf: getShim, element: shim, elementType: shim, instanceOf: getShim, node: shim, objectOf: getShim, oneOf: getShim, oneOfType: getShim, shape: getShim, exact: getShim, checkPropTypes: emptyFunctionWithReset, resetWarningCache: emptyFunction }; ReactPropTypes.PropTypes = ReactPropTypes; return ReactPropTypes; }; /***/ }), /* 59 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; /** * Copyright (c) 2013-present, Facebook, Inc. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ var ReactPropTypesSecret = 'SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED'; module.exports = ReactPropTypesSecret; /***/ }), /* 60 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = hyphenateStyleName; var _hyphenate = _interopRequireDefault(__webpack_require__(61)); /** * Copyright 2013-2014, Facebook, Inc. * All rights reserved. * https://github.com/facebook/react/blob/2aeb8a2a6beb00617a4217f7f8284924fa2ad819/src/vendor/core/hyphenateStyleName.js */ var msPattern = /^ms-/; function hyphenateStyleName(string) { return (0, _hyphenate.default)(string).replace(msPattern, '-ms-'); } module.exports = exports["default"]; /***/ }), /* 61 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = hyphenate; var rUpper = /([A-Z])/g; function hyphenate(string) { return string.replace(rUpper, '-$1').toLowerCase(); } module.exports = exports["default"]; /***/ }), /* 62 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = _getComputedStyle; var _camelizeStyle = _interopRequireDefault(__webpack_require__(40)); var rposition = /^(top|right|bottom|left)$/; var rnumnonpx = /^([+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|))(?!px)[a-z%]+$/i; function _getComputedStyle(node) { if (!node) throw new TypeError('No Element passed to `getComputedStyle()`'); var doc = node.ownerDocument; return 'defaultView' in doc ? doc.defaultView.opener ? node.ownerDocument.defaultView.getComputedStyle(node, null) : window.getComputedStyle(node, null) : { //ie 8 "magic" from: https://github.com/jquery/jquery/blob/1.11-stable/src/css/curCSS.js#L72 getPropertyValue: function getPropertyValue(prop) { var style = node.style; prop = (0, _camelizeStyle.default)(prop); if (prop == 'float') prop = 'styleFloat'; var current = node.currentStyle[prop] || null; if (current == null && style && style[prop]) current = style[prop]; if (rnumnonpx.test(current) && !rposition.test(prop)) { // Remember the original values var left = style.left; var runStyle = node.runtimeStyle; var rsLeft = runStyle && runStyle.left; // Put in the new values to get a computed value out if (rsLeft) runStyle.left = node.currentStyle.left; style.left = prop === 'fontSize' ? '1em' : current; current = style.pixelLeft + 'px'; // Revert the changed values style.left = left; if (rsLeft) runStyle.left = rsLeft; } return current; } }; } module.exports = exports["default"]; /***/ }), /* 63 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = removeStyle; function removeStyle(node, key) { return 'removeProperty' in node.style ? node.style.removeProperty(key) : node.style.removeAttribute(key); } module.exports = exports["default"]; /***/ }), /* 64 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = isTransform; var supportedTransforms = /^((translate|rotate|scale)(X|Y|Z|3d)?|matrix(3d)?|perspective|skew(X|Y)?)$/i; function isTransform(property) { return !!(property && supportedTransforms.test(property)); } module.exports = exports["default"]; /***/ }), /* 65 */ /***/ (function(module, __webpack_exports__, __webpack_require__) { "use strict"; __webpack_require__.r(__webpack_exports__); /* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "polyfill", function() { return polyfill; }); /** * Copyright (c) 2013-present, Facebook, Inc. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ function componentWillMount() { // Call this.constructor.gDSFP to support sub-classes. var state = this.constructor.getDerivedStateFromProps(this.props, this.state); if (state !== null && state !== undefined) { this.setState(state); } } function componentWillReceiveProps(nextProps) { // Call this.constructor.gDSFP to support sub-classes. // Use the setState() updater to ensure state isn't stale in certain edge cases. function updater(prevState) { var state = this.constructor.getDerivedStateFromProps(nextProps, prevState); return state !== null && state !== undefined ? state : null; } // Binding "this" is important for shallow renderer support. this.setState(updater.bind(this)); } function componentWillUpdate(nextProps, nextState) { try { var prevProps = this.props; var prevState = this.state; this.props = nextProps; this.state = nextState; this.__reactInternalSnapshotFlag = true; this.__reactInternalSnapshot = this.getSnapshotBeforeUpdate(prevProps, prevState); } finally { this.props = prevProps; this.state = prevState; } } // React may warn about cWM/cWRP/cWU methods being deprecated. // Add a flag to suppress these warnings for this special case. componentWillMount.__suppressDeprecationWarning = true; componentWillReceiveProps.__suppressDeprecationWarning = true; componentWillUpdate.__suppressDeprecationWarning = true; function polyfill(Component) { var prototype = Component.prototype; if (!prototype || !prototype.isReactComponent) { throw new Error('Can only polyfill class components'); } if (typeof Component.getDerivedStateFromProps !== 'function' && typeof prototype.getSnapshotBeforeUpdate !== 'function') { return Component; } // If new component APIs are defined, "unsafe" lifecycles won't be called. // Error if any of these lifecycles are present, // Because they would work differently between older and newer (16.3+) versions of React. var foundWillMountName = null; var foundWillReceivePropsName = null; var foundWillUpdateName = null; if (typeof prototype.componentWillMount === 'function') { foundWillMountName = 'componentWillMount'; } else if (typeof prototype.UNSAFE_componentWillMount === 'function') { foundWillMountName = 'UNSAFE_componentWillMount'; } if (typeof prototype.componentWillReceiveProps === 'function') { foundWillReceivePropsName = 'componentWillReceiveProps'; } else if (typeof prototype.UNSAFE_componentWillReceiveProps === 'function') { foundWillReceivePropsName = 'UNSAFE_componentWillReceiveProps'; } if (typeof prototype.componentWillUpdate === 'function') { foundWillUpdateName = 'componentWillUpdate'; } else if (typeof prototype.UNSAFE_componentWillUpdate === 'function') { foundWillUpdateName = 'UNSAFE_componentWillUpdate'; } if (foundWillMountName !== null || foundWillReceivePropsName !== null || foundWillUpdateName !== null) { var componentName = Component.displayName || Component.name; var newApiName = typeof Component.getDerivedStateFromProps === 'function' ? 'getDerivedStateFromProps()' : 'getSnapshotBeforeUpdate()'; throw Error('Unsafe legacy lifecycles will not be called for components using new component APIs.\n\n' + componentName + ' uses ' + newApiName + ' but also contains the following legacy lifecycles:' + (foundWillMountName !== null ? '\n ' + foundWillMountName : '') + (foundWillReceivePropsName !== null ? '\n ' + foundWillReceivePropsName : '') + (foundWillUpdateName !== null ? '\n ' + foundWillUpdateName : '') + '\n\nThe above lifecycles should be removed. Learn more about this warning here:\n' + 'https://fb.me/react-async-component-lifecycle-hooks'); } // React <= 16.2 does not support static getDerivedStateFromProps. // As a workaround, use cWM and cWRP to invoke the new static lifecycle. // Newer versions of React will ignore these lifecycles if gDSFP exists. if (typeof Component.getDerivedStateFromProps === 'function') { prototype.componentWillMount = componentWillMount; prototype.componentWillReceiveProps = componentWillReceiveProps; } // React <= 16.2 does not support getSnapshotBeforeUpdate. // As a workaround, use cWU to invoke the new lifecycle. // Newer versions of React will ignore that lifecycle if gSBU exists. if (typeof prototype.getSnapshotBeforeUpdate === 'function') { if (typeof prototype.componentDidUpdate !== 'function') { throw new Error('Cannot polyfill getSnapshotBeforeUpdate() for components that do not define componentDidUpdate() on the prototype'); } prototype.componentWillUpdate = componentWillUpdate; var componentDidUpdate = prototype.componentDidUpdate; prototype.componentDidUpdate = function componentDidUpdatePolyfill(prevProps, prevState, maybeSnapshot) { // 16.3+ will not execute our will-update method; // It will pass a snapshot value to did-update though. // Older versions will require our polyfilled will-update value. // We need to handle both cases, but can't just check for the presence of "maybeSnapshot", // Because for <= 15.x versions this might be a "prevContext" object. // We also can't just check "__reactInternalSnapshot", // Because get-snapshot might return a falsy value. // So check for the explicit __reactInternalSnapshotFlag flag to determine behavior. var snapshot = this.__reactInternalSnapshotFlag ? this.__reactInternalSnapshot : maybeSnapshot; componentDidUpdate.call(this, prevProps, prevState, snapshot); }; } return Component; } /***/ }), /* 66 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.classNamesShape = exports.timeoutsShape = void 0; var _propTypes = _interopRequireDefault(__webpack_require__(0)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } var timeoutsShape = false ? undefined : null; exports.timeoutsShape = timeoutsShape; var classNamesShape = false ? undefined : null; exports.classNamesShape = classNamesShape; /***/ }), /* 67 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.default = deprecated; var _warning = __webpack_require__(68); var _warning2 = _interopRequireDefault(_warning); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } var warned = {}; function deprecated(validator, reason) { return function validate(props, propName, componentName, location, propFullName) { var componentNameSafe = componentName || '<>'; var propFullNameSafe = propFullName || propName; if (props[propName] != null) { var messageKey = componentName + '.' + propName; (0, _warning2.default)(warned[messageKey], 'The ' + location + ' `' + propFullNameSafe + '` of ' + ('`' + componentNameSafe + '` is deprecated. ' + reason + '.')); warned[messageKey] = true; } for (var _len = arguments.length, args = Array(_len > 5 ? _len - 5 : 0), _key = 5; _key < _len; _key++) { args[_key - 5] = arguments[_key]; } return validator.apply(undefined, [props, propName, componentName, location, propFullName].concat(args)); }; } /* eslint-disable no-underscore-dangle */ function _resetWarned() { warned = {}; } deprecated._resetWarned = _resetWarned; /* eslint-enable no-underscore-dangle */ module.exports = exports['default']; /***/ }), /* 68 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; /** * Copyright 2014-2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. */ /** * Similar to invariant but only logs a warning if the condition is not met. * This can be used to log issues in development environments in critical * paths. Removing the logging code for production environments will keep the * same logic and follow the same code paths. */ var warning = function () {}; if (false) {} module.exports = warning; /***/ }), /* 69 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; if (true) { module.exports = __webpack_require__(70); } else {} /***/ }), /* 70 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; /** @license React v16.8.6 * react-is.production.min.js * * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ Object.defineProperty(exports, "__esModule", { value: !0 }); var b = "function" === typeof Symbol && Symbol.for, c = b ? Symbol.for("react.element") : 60103, d = b ? Symbol.for("react.portal") : 60106, e = b ? Symbol.for("react.fragment") : 60107, f = b ? Symbol.for("react.strict_mode") : 60108, g = b ? Symbol.for("react.profiler") : 60114, h = b ? Symbol.for("react.provider") : 60109, k = b ? Symbol.for("react.context") : 60110, l = b ? Symbol.for("react.async_mode") : 60111, m = b ? Symbol.for("react.concurrent_mode") : 60111, n = b ? Symbol.for("react.forward_ref") : 60112, p = b ? Symbol.for("react.suspense") : 60113, q = b ? Symbol.for("react.memo") : 60115, r = b ? Symbol.for("react.lazy") : 60116; function t(a) { if ("object" === typeof a && null !== a) { var u = a.$$typeof; switch (u) { case c: switch (a = a.type, a) { case l: case m: case e: case g: case f: case p: return a; default: switch (a = a && a.$$typeof, a) { case k: case n: case h: return a; default: return u; } } case r: case q: case d: return u; } } } function v(a) { return t(a) === m; } exports.typeOf = t; exports.AsyncMode = l; exports.ConcurrentMode = m; exports.ContextConsumer = k; exports.ContextProvider = h; exports.Element = c; exports.ForwardRef = n; exports.Fragment = e; exports.Lazy = r; exports.Memo = q; exports.Portal = d; exports.Profiler = g; exports.StrictMode = f; exports.Suspense = p; exports.isValidElementType = function (a) { return "string" === typeof a || "function" === typeof a || a === e || a === m || a === g || a === f || a === p || "object" === typeof a && null !== a && (a.$$typeof === r || a.$$typeof === q || a.$$typeof === h || a.$$typeof === k || a.$$typeof === n); }; exports.isAsyncMode = function (a) { return v(a) || t(a) === l; }; exports.isConcurrentMode = v; exports.isContextConsumer = function (a) { return t(a) === k; }; exports.isContextProvider = function (a) { return t(a) === h; }; exports.isElement = function (a) { return "object" === typeof a && null !== a && a.$$typeof === c; }; exports.isForwardRef = function (a) { return t(a) === n; }; exports.isFragment = function (a) { return t(a) === e; }; exports.isLazy = function (a) { return t(a) === r; }; exports.isMemo = function (a) { return t(a) === q; }; exports.isPortal = function (a) { return t(a) === d; }; exports.isProfiler = function (a) { return t(a) === g; }; exports.isStrictMode = function (a) { return t(a) === f; }; exports.isSuspense = function (a) { return t(a) === p; }; /***/ }), /* 71 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = void 0; var _react = __webpack_require__(1); function useCommittedRef(value) { var ref = (0, _react.useRef)(value); (0, _react.useEffect)(function () { ref.current = value; }, [value]); return ref; } var _default = useCommittedRef; exports.default = _default; /***/ }), /* 72 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = matches; var _inDOM = _interopRequireDefault(__webpack_require__(10)); var _querySelectorAll = _interopRequireDefault(__webpack_require__(9)); var matchesCache; function matches(node, selector) { if (!matchesCache && _inDOM.default) { var body = document.body; var nativeMatch = body.matches || body.matchesSelector || body.webkitMatchesSelector || body.mozMatchesSelector || body.msMatchesSelector; matchesCache = nativeMatch ? function (node, selector) { return nativeMatch.call(node, selector); } : ie8MatchesSelector; } return matchesCache ? matchesCache(node, selector) : null; } function ie8MatchesSelector(node, selector) { var matches = (0, _querySelectorAll.default)(node.document || node.ownerDocument, selector), i = 0; while (matches[i] && matches[i] !== node) { i++; } return !!matches[i]; } module.exports = exports["default"]; /***/ }), /* 73 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; var _react = __webpack_require__(1); var _react2 = _interopRequireDefault(_react); var _propTypes = __webpack_require__(0); var _propTypes2 = _interopRequireDefault(_propTypes); var _gud = __webpack_require__(74); var _gud2 = _interopRequireDefault(_gud); var _warning = __webpack_require__(75); var _warning2 = _interopRequireDefault(_warning); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; } function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } var MAX_SIGNED_31_BIT_INT = 1073741823; // Inlined Object.is polyfill. // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/is function objectIs(x, y) { if (x === y) { return x !== 0 || 1 / x === 1 / y; } else { return x !== x && y !== y; } } function createEventEmitter(value) { var handlers = []; return { on: function on(handler) { handlers.push(handler); }, off: function off(handler) { handlers = handlers.filter(function (h) { return h !== handler; }); }, get: function get() { return value; }, set: function set(newValue, changedBits) { value = newValue; handlers.forEach(function (handler) { return handler(value, changedBits); }); } }; } function onlyChild(children) { return Array.isArray(children) ? children[0] : children; } function createReactContext(defaultValue, calculateChangedBits) { var _Provider$childContex, _Consumer$contextType; var contextProp = '__create-react-context-' + (0, _gud2.default)() + '__'; var Provider = function (_Component) { _inherits(Provider, _Component); function Provider() { var _temp, _this, _ret; _classCallCheck(this, Provider); for (var _len = arguments.length, args = Array(_len), _key = 0; _key < _len; _key++) { args[_key] = arguments[_key]; } return _ret = (_temp = (_this = _possibleConstructorReturn(this, _Component.call.apply(_Component, [this].concat(args))), _this), _this.emitter = createEventEmitter(_this.props.value), _temp), _possibleConstructorReturn(_this, _ret); } Provider.prototype.getChildContext = function getChildContext() { var _ref; return _ref = {}, _ref[contextProp] = this.emitter, _ref; }; Provider.prototype.componentWillReceiveProps = function componentWillReceiveProps(nextProps) { if (this.props.value !== nextProps.value) { var oldValue = this.props.value; var newValue = nextProps.value; var changedBits = void 0; if (objectIs(oldValue, newValue)) { changedBits = 0; // No change } else { changedBits = typeof calculateChangedBits === 'function' ? calculateChangedBits(oldValue, newValue) : MAX_SIGNED_31_BIT_INT; if (false) {} changedBits |= 0; if (changedBits !== 0) { this.emitter.set(nextProps.value, changedBits); } } } }; Provider.prototype.render = function render() { return this.props.children; }; return Provider; }(_react.Component); Provider.childContextTypes = (_Provider$childContex = {}, _Provider$childContex[contextProp] = _propTypes2.default.object.isRequired, _Provider$childContex); var Consumer = function (_Component2) { _inherits(Consumer, _Component2); function Consumer() { var _temp2, _this2, _ret2; _classCallCheck(this, Consumer); for (var _len2 = arguments.length, args = Array(_len2), _key2 = 0; _key2 < _len2; _key2++) { args[_key2] = arguments[_key2]; } return _ret2 = (_temp2 = (_this2 = _possibleConstructorReturn(this, _Component2.call.apply(_Component2, [this].concat(args))), _this2), _this2.state = { value: _this2.getValue() }, _this2.onUpdate = function (newValue, changedBits) { var observedBits = _this2.observedBits | 0; if ((observedBits & changedBits) !== 0) { _this2.setState({ value: _this2.getValue() }); } }, _temp2), _possibleConstructorReturn(_this2, _ret2); } Consumer.prototype.componentWillReceiveProps = function componentWillReceiveProps(nextProps) { var observedBits = nextProps.observedBits; this.observedBits = observedBits === undefined || observedBits === null ? MAX_SIGNED_31_BIT_INT // Subscribe to all changes by default : observedBits; }; Consumer.prototype.componentDidMount = function componentDidMount() { if (this.context[contextProp]) { this.context[contextProp].on(this.onUpdate); } var observedBits = this.props.observedBits; this.observedBits = observedBits === undefined || observedBits === null ? MAX_SIGNED_31_BIT_INT // Subscribe to all changes by default : observedBits; }; Consumer.prototype.componentWillUnmount = function componentWillUnmount() { if (this.context[contextProp]) { this.context[contextProp].off(this.onUpdate); } }; Consumer.prototype.getValue = function getValue() { if (this.context[contextProp]) { return this.context[contextProp].get(); } else { return defaultValue; } }; Consumer.prototype.render = function render() { return onlyChild(this.props.children)(this.state.value); }; return Consumer; }(_react.Component); Consumer.contextTypes = (_Consumer$contextType = {}, _Consumer$contextType[contextProp] = _propTypes2.default.object, _Consumer$contextType); return { Provider: Provider, Consumer: Consumer }; } exports.default = createReactContext; module.exports = exports['default']; /***/ }), /* 74 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; /* WEBPACK VAR INJECTION */(function(global) {// @flow var key = '__global_unique_id__'; module.exports = function () { return global[key] = (global[key] || 0) + 1; }; /* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(41))) /***/ }), /* 75 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; /** * Copyright (c) 2014-present, Facebook, Inc. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. * */ var emptyFunction = __webpack_require__(76); /** * Similar to invariant but only logs a warning if the condition is not met. * This can be used to log issues in development environments in critical * paths. Removing the logging code for production environments will keep the * same logic and follow the same code paths. */ var warning = emptyFunction; if (false) { var printWarning; } module.exports = warning; /***/ }), /* 76 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; /** * Copyright (c) 2013-present, Facebook, Inc. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. * * */ function makeEmptyFunction(arg) { return function () { return arg; }; } /** * This function accepts and discards inputs; it has no side effects. This is * primarily useful idiomatically for overridable function endpoints which * always need to be callable, since JS lacks a null-call idiom ala Cocoa. */ var emptyFunction = function emptyFunction() {}; emptyFunction.thatReturns = makeEmptyFunction; emptyFunction.thatReturnsFalse = makeEmptyFunction(false); emptyFunction.thatReturnsTrue = makeEmptyFunction(true); emptyFunction.thatReturnsNull = makeEmptyFunction(null); emptyFunction.thatReturnsThis = function () { return this; }; emptyFunction.thatReturnsArgument = function (arg) { return arg; }; module.exports = emptyFunction; /***/ }), /* 77 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = mapContextToProps; var _react = _interopRequireDefault(__webpack_require__(1)); var _forwardRef = _interopRequireDefault(__webpack_require__(46)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _extends() { _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); } var getDisplayName = function getDisplayName(Component) { var name = typeof Component === 'string' ? Component : Component.name || Component.displayName; return name ? "ContextTransform(" + name + ")" : 'ContextTransform'; }; var ensureConsumer = function ensureConsumer(c) { return c.Consumer || c; }; function $mapContextToProps(_ref, Component) { var maybeArrayOfConsumers = _ref.consumers, mapToProps = _ref.mapToProps, displayName = _ref.displayName, _ref$forwardRefAs = _ref.forwardRefAs, forwardRefAs = _ref$forwardRefAs === void 0 ? 'ref' : _ref$forwardRefAs; var consumers = maybeArrayOfConsumers; if (!Array.isArray(maybeArrayOfConsumers)) { consumers = [maybeArrayOfConsumers]; } var SingleConsumer = ensureConsumer(consumers[0]); function singleRender(props, ref) { var _extends2; var propsWithRef = _extends((_extends2 = {}, _extends2[forwardRefAs] = ref, _extends2), props); return _react.default.createElement(SingleConsumer, null, function (value) { return _react.default.createElement(Component, _extends({}, propsWithRef, mapToProps(value, props))); }); } function multiRender(props, ref) { var _extends3; var propsWithRef = _extends((_extends3 = {}, _extends3[forwardRefAs] = ref, _extends3), props); return consumers.reduceRight(function (inner, Context) { return function () { for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { args[_key] = arguments[_key]; } var Consumer = ensureConsumer(Context); return _react.default.createElement(Consumer, null, function (value) { return inner.apply(void 0, args.concat([value])); }); }; }, function () { for (var _len2 = arguments.length, contexts = new Array(_len2), _key2 = 0; _key2 < _len2; _key2++) { contexts[_key2] = arguments[_key2]; } return _react.default.createElement(Component, _extends({}, propsWithRef, mapToProps.apply(void 0, contexts.concat([props])))); })(); } var contextTransform = consumers.length === 1 ? singleRender : multiRender; return (0, _forwardRef.default)(contextTransform, { displayName: displayName || getDisplayName(Component) }); } function mapContextToProps(maybeOpts, mapToProps, Component) { if (arguments.length === 2) return $mapContextToProps(maybeOpts, mapToProps); return $mapContextToProps({ consumers: maybeOpts, mapToProps: mapToProps }, Component); } /***/ }), /* 78 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = filterEvents; var _contains = _interopRequireDefault(__webpack_require__(22)); var _querySelectorAll = _interopRequireDefault(__webpack_require__(9)); function filterEvents(selector, handler) { return function filterHandler(e) { var top = e.currentTarget, target = e.target, matches = (0, _querySelectorAll.default)(top, selector); if (matches.some(function (match) { return (0, _contains.default)(match, target); })) handler.call(this, e); }; } module.exports = exports["default"]; /***/ }), /* 79 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = activeElement; var _ownerDocument = _interopRequireDefault(__webpack_require__(16)); function activeElement(doc) { if (doc === void 0) { doc = (0, _ownerDocument.default)(); } try { return doc.activeElement; } catch (e) { /* ie throws if no active element */ } } module.exports = exports["default"]; /***/ }), /* 80 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = void 0; var _addClass = _interopRequireDefault(__webpack_require__(81)); exports.addClass = _addClass.default; var _removeClass = _interopRequireDefault(__webpack_require__(82)); exports.removeClass = _removeClass.default; var _hasClass = _interopRequireDefault(__webpack_require__(47)); exports.hasClass = _hasClass.default; var _default = { addClass: _addClass.default, removeClass: _removeClass.default, hasClass: _hasClass.default }; exports.default = _default; /***/ }), /* 81 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; var _interopRequireDefault = __webpack_require__(5); exports.__esModule = true; exports.default = addClass; var _hasClass = _interopRequireDefault(__webpack_require__(47)); function addClass(element, className) { if (element.classList) element.classList.add(className);else if (!(0, _hasClass.default)(element, className)) if (typeof element.className === 'string') element.className = element.className + ' ' + className;else element.setAttribute('class', (element.className && element.className.baseVal || '') + ' ' + className); } module.exports = exports["default"]; /***/ }), /* 82 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; function replaceClassName(origClass, classToRemove) { return origClass.replace(new RegExp('(^|\\s)' + classToRemove + '(?:\\s|$)', 'g'), '$1').replace(/\s+/g, ' ').replace(/^\s*|\s*$/g, ''); } module.exports = function removeClass(element, className) { if (element.classList) element.classList.remove(className);else if (typeof element.className === 'string') element.className = replaceClassName(element.className, className);else element.setAttribute('class', replaceClassName(element.className && element.className.baseVal || '', className)); }; /***/ }), /* 83 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = isOverflowing; var _isWindow = _interopRequireDefault(__webpack_require__(84)); var _ownerDocument = _interopRequireDefault(__webpack_require__(16)); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function isBody(node) { return node && node.tagName.toLowerCase() === 'body'; } function bodyIsOverflowing(node) { var doc = (0, _ownerDocument.default)(node); var win = (0, _isWindow.default)(doc); return doc.body.clientWidth < win.innerWidth; } function isOverflowing(container) { var win = (0, _isWindow.default)(container); return win || isBody(container) ? bodyIsOverflowing(container) : container.scrollHeight > container.clientHeight; } module.exports = exports.default; /***/ }), /* 84 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.default = getWindow; function getWindow(node) { return node === node.window ? node : node.nodeType === 9 ? node.defaultView || node.parentWindow : false; } module.exports = exports["default"]; /***/ }), /* 85 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; exports.__esModule = true; exports.ariaHidden = ariaHidden; exports.hideSiblings = hideSiblings; exports.showSiblings = showSiblings; var BLACKLIST = ['template', 'script', 'style']; var isHidable = function isHidable(_ref) { var nodeType = _ref.nodeType, tagName = _ref.tagName; return nodeType === 1 && BLACKLIST.indexOf(tagName.toLowerCase()) === -1; }; var siblings = function siblings(container, exclude, cb) { exclude = [].concat(exclude); [].forEach.call(container.children, function (node) { if (exclude.indexOf(node) === -1 && isHidable(node)) { cb(node); } }); }; function ariaHidden(show, node) { if (!node) return; if (show) { node.setAttribute('aria-hidden', 'true'); } else { node.removeAttribute('aria-hidden'); } } function hideSiblings(container, _ref2) { var root = _ref2.root, backdrop = _ref2.backdrop; siblings(container, [root, backdrop], function (node) { return ariaHidden(true, node); }); } function showSiblings(container, _ref3) { var root = _ref3.root, backdrop = _ref3.backdrop; siblings(container, [root, backdrop], function (node) { return ariaHidden(false, node); }); } /***/ }), /* 86 */ /***/ (function(module, __webpack_exports__, __webpack_require__) { "use strict"; __webpack_require__.r(__webpack_exports__); // CONCATENATED MODULE: ./node_modules/@babel/runtime/helpers/esm/extends.js function _extends() { _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); } // CONCATENATED MODULE: ./node_modules/@babel/runtime/helpers/esm/objectWithoutPropertiesLoose.js function _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; } // EXTERNAL MODULE: ./node_modules/classnames/index.js var classnames = __webpack_require__(2); var classnames_default = /*#__PURE__*/__webpack_require__.n(classnames); // EXTERNAL MODULE: external {"root":"React","commonjs2":"react","commonjs":"react","amd":"react"} var external_root_React_commonjs2_react_commonjs_react_amd_react_ = __webpack_require__(1); var external_root_React_commonjs2_react_commonjs_react_amd_react_default = /*#__PURE__*/__webpack_require__.n(external_root_React_commonjs2_react_commonjs_react_amd_react_); // EXTERNAL MODULE: ./node_modules/prop-types/index.js var prop_types = __webpack_require__(0); var prop_types_default = /*#__PURE__*/__webpack_require__.n(prop_types); // EXTERNAL MODULE: ./node_modules/uncontrollable/hook.js var hook = __webpack_require__(15); var hook_default = /*#__PURE__*/__webpack_require__.n(hook); // CONCATENATED MODULE: ./node_modules/@babel/runtime/helpers/esm/inheritsLoose.js function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; } // EXTERNAL MODULE: ./node_modules/@restart/context/forwardRef.js var forwardRef = __webpack_require__(32); var forwardRef_default = /*#__PURE__*/__webpack_require__.n(forwardRef); // CONCATENATED MODULE: ./src/ThemeProvider.js var _jsxFileName = "/Users/jason/src/react-bootstrap/src/ThemeProvider.js"; var ThemeContext = external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createContext(new Map()); var Consumer = ThemeContext.Consumer, Provider = ThemeContext.Provider; var ThemeProvider_ThemeProvider = /*#__PURE__*/ function (_React$Component) { _inheritsLoose(ThemeProvider, _React$Component); function ThemeProvider() { var _this; for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { args[_key] = arguments[_key]; } _this = _React$Component.call.apply(_React$Component, [this].concat(args)) || this; _this.prefixes = new Map(); Object.keys(_this.props.prefixes).forEach(function (key) { _this.prefixes.set(key, _this.props.prefixes[key]); }); return _this; } var _proto = ThemeProvider.prototype; _proto.render = function render() { return external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement(Provider, { value: this.prefixes, __source: { fileName: _jsxFileName, lineNumber: 22 }, __self: this }, this.props.children); }; return ThemeProvider; }(external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.Component); ThemeProvider_ThemeProvider.propTypes = { prefixes: prop_types_default.a.object.isRequired }; function useBootstrapPrefix(prefix, defaultPrefix) { var prefixes = Object(external_root_React_commonjs2_react_commonjs_react_amd_react_["useContext"])(ThemeContext); return prefix || prefixes.get(defaultPrefix) || defaultPrefix; } function createBootstrapComponent(Component, opts) { if (typeof opts === 'string') opts = { prefix: opts }; var isClassy = Component.prototype && Component.prototype.isReactComponent; // If it's a functional component make sure we don't break it with a ref var _opts = opts, prefix = _opts.prefix, _opts$forwardRefAs = _opts.forwardRefAs, forwardRefAs = _opts$forwardRefAs === void 0 ? isClassy ? 'ref' : 'innerRef' : _opts$forwardRefAs; return forwardRef_default()(function (_ref, ref) { var props = _extends({}, _ref); props[forwardRefAs] = ref; var prefixes = Object(external_root_React_commonjs2_react_commonjs_react_amd_react_["useContext"])(ThemeContext); return external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement(Component, _extends({}, props, { // eslint-disable-next-line react/prop-types bsPrefix: props.bsPrefix || prefixes.get(prefix) || prefix, __source: { fileName: _jsxFileName, lineNumber: 42 }, __self: this })); }, { displayName: "Bootstrap(" + (Component.displayName || Component.name) + ")" }); } /* harmony default export */ var src_ThemeProvider = (ThemeProvider_ThemeProvider); // CONCATENATED MODULE: ./src/SelectableContext.js var SelectableContext = external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createContext(); var makeEventKey = function makeEventKey(eventKey, href) { if (eventKey != null) return String(eventKey); return href || null; }; /* harmony default export */ var src_SelectableContext = (SelectableContext); // CONCATENATED MODULE: ./src/AccordionToggle.js var AccordionToggle_jsxFileName = "/Users/jason/src/react-bootstrap/src/AccordionToggle.js"; var propTypes = { /** Set a custom element for this component */ as: prop_types_default.a.elementType, /** * A key that corresponds to the collapse component that gets triggered * when this has been clicked. */ eventKey: prop_types_default.a.string.isRequired, /** A callback function for when this component is clicked */ onClick: prop_types_default.a.func, /** Children prop should only contain a single child, and is enforced as such */ children: prop_types_default.a.element }; var AccordionToggle_defaultProps = { as: 'button' }; var AccordionToggle = external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.forwardRef(function (_ref, ref) { var Component = _ref.as, children = _ref.children, eventKey = _ref.eventKey, _onClick = _ref.onClick, props = _objectWithoutPropertiesLoose(_ref, ["as", "children", "eventKey", "onClick"]); var onSelect = Object(external_root_React_commonjs2_react_commonjs_react_amd_react_["useContext"])(src_SelectableContext); return external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement(Component, _extends({ ref: ref, onClick: function onClick(e) { onSelect(eventKey, e); if (_onClick) _onClick(e); } }, props, { __source: { fileName: AccordionToggle_jsxFileName, lineNumber: 31 }, __self: this }), children); }); AccordionToggle.propTypes = propTypes; AccordionToggle.defaultProps = AccordionToggle_defaultProps; /* harmony default export */ var src_AccordionToggle = (AccordionToggle); // EXTERNAL MODULE: ./node_modules/dom-helpers/style/index.js var dom_helpers_style = __webpack_require__(11); var style_default = /*#__PURE__*/__webpack_require__.n(dom_helpers_style); // EXTERNAL MODULE: ./node_modules/dom-helpers/transition/end.js var end = __webpack_require__(23); var end_default = /*#__PURE__*/__webpack_require__.n(end); // EXTERNAL MODULE: ./node_modules/react-transition-group/Transition.js var react_transition_group_Transition = __webpack_require__(12); var Transition_default = /*#__PURE__*/__webpack_require__.n(react_transition_group_Transition); // CONCATENATED MODULE: ./src/utils/triggerBrowserReflow.js // reading a dimension prop will cause the browser to recalculate, // which will let our animations work function triggerBrowserReflow(node) { node.offsetHeight; // eslint-disable-line no-unused-expressions } // CONCATENATED MODULE: ./src/utils/createChainedFunction.js /** * Safe chained function * * Will only create a new function if needed, * otherwise will pass back existing functions or null. * * @param {function} functions to chain * @returns {function|null} */ function createChainedFunction() { for (var _len = arguments.length, funcs = new Array(_len), _key = 0; _key < _len; _key++) { funcs[_key] = arguments[_key]; } return funcs.filter(function (f) { return f != null; }).reduce(function (acc, f) { if (typeof f !== 'function') { throw new Error('Invalid Argument Type, must only provide functions, undefined, or null.'); } if (acc === null) return f; return function chainedFunction() { for (var _len2 = arguments.length, args = new Array(_len2), _key2 = 0; _key2 < _len2; _key2++) { args[_key2] = arguments[_key2]; } acc.apply(this, args); f.apply(this, args); }; }, null); } /* harmony default export */ var utils_createChainedFunction = (createChainedFunction); // CONCATENATED MODULE: ./src/Collapse.js var _collapseStyles, Collapse_jsxFileName = "/Users/jason/src/react-bootstrap/src/Collapse.js"; var MARGINS = { height: ['marginTop', 'marginBottom'], width: ['marginLeft', 'marginRight'] }; function getDimensionValue(dimension, elem) { var offset = "offset" + dimension[0].toUpperCase() + dimension.slice(1); var value = elem[offset]; var margins = MARGINS[dimension]; return value + parseInt(style_default()(elem, margins[0]), 10) + parseInt(style_default()(elem, margins[1]), 10); } var collapseStyles = (_collapseStyles = {}, _collapseStyles[react_transition_group_Transition["EXITED"]] = 'collapse', _collapseStyles[react_transition_group_Transition["EXITING"]] = 'collapsing', _collapseStyles[react_transition_group_Transition["ENTERING"]] = 'collapsing', _collapseStyles[react_transition_group_Transition["ENTERED"]] = 'collapse show', _collapseStyles); var Collapse_propTypes = { /** * Show the component; triggers the expand or collapse animation */ in: prop_types_default.a.bool, /** * Wait until the first "enter" transition to mount the component (add it to the DOM) */ mountOnEnter: prop_types_default.a.bool, /** * Unmount the component (remove it from the DOM) when it is collapsed */ unmountOnExit: prop_types_default.a.bool, /** * Run the expand animation when the component mounts, if it is initially * shown */ appear: prop_types_default.a.bool, /** * Duration of the collapse animation in milliseconds, to ensure that * finishing callbacks are fired even if the original browser transition end * events are canceled */ timeout: prop_types_default.a.number, /** * Callback fired before the component expands */ onEnter: prop_types_default.a.func, /** * Callback fired after the component starts to expand */ onEntering: prop_types_default.a.func, /** * Callback fired after the component has expanded */ onEntered: prop_types_default.a.func, /** * Callback fired before the component collapses */ onExit: prop_types_default.a.func, /** * Callback fired after the component starts to collapse */ onExiting: prop_types_default.a.func, /** * Callback fired after the component has collapsed */ onExited: prop_types_default.a.func, /** * The dimension used when collapsing, or a function that returns the * dimension * * _Note: Bootstrap only partially supports 'width'! * You will need to supply your own CSS animation for the `.width` CSS class._ */ dimension: prop_types_default.a.oneOfType([prop_types_default.a.oneOf(['height', 'width']), prop_types_default.a.func]), /** * Function that returns the height or width of the animating DOM node * * Allows for providing some custom logic for how much the Collapse component * should animate in its specified dimension. Called with the current * dimension prop value and the DOM node. * * @default element.offsetWidth | element.offsetHeight */ getDimensionValue: prop_types_default.a.func, /** * ARIA role of collapsible element */ role: prop_types_default.a.string }; var Collapse_defaultProps = { in: false, timeout: 300, mountOnEnter: false, unmountOnExit: false, appear: false, dimension: 'height', getDimensionValue: getDimensionValue }; var Collapse_Collapse = /*#__PURE__*/ function (_React$Component) { _inheritsLoose(Collapse, _React$Component); function Collapse() { var _this; for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { args[_key] = arguments[_key]; } _this = _React$Component.call.apply(_React$Component, [this].concat(args)) || this; _this.handleEnter = function (elem) { elem.style[_this.getDimension()] = '0'; }; _this.handleEntering = function (elem) { var dimension = _this.getDimension(); elem.style[dimension] = _this._getScrollDimensionValue(elem, dimension); }; _this.handleEntered = function (elem) { elem.style[_this.getDimension()] = null; }; _this.handleExit = function (elem) { var dimension = _this.getDimension(); elem.style[dimension] = _this.props.getDimensionValue(dimension, elem) + "px"; triggerBrowserReflow(elem); }; _this.handleExiting = function (elem) { elem.style[_this.getDimension()] = '0'; }; return _this; } var _proto = Collapse.prototype; _proto.getDimension = function getDimension() { return typeof this.props.dimension === 'function' ? this.props.dimension() : this.props.dimension; } /* -- Expanding -- */ ; // for testing _proto._getScrollDimensionValue = function _getScrollDimensionValue(elem, dimension) { var scroll = "scroll" + dimension[0].toUpperCase() + dimension.slice(1); return elem[scroll] + "px"; }; _proto.render = function render() { var _this2 = this; var _this$props = this.props, onEnter = _this$props.onEnter, onEntering = _this$props.onEntering, onEntered = _this$props.onEntered, onExit = _this$props.onExit, onExiting = _this$props.onExiting, className = _this$props.className, children = _this$props.children, props = _objectWithoutPropertiesLoose(_this$props, ["onEnter", "onEntering", "onEntered", "onExit", "onExiting", "className", "children"]); delete props.dimension; delete props.getDimensionValue; var handleEnter = utils_createChainedFunction(this.handleEnter, onEnter); var handleEntering = utils_createChainedFunction(this.handleEntering, onEntering); var handleEntered = utils_createChainedFunction(this.handleEntered, onEntered); var handleExit = utils_createChainedFunction(this.handleExit, onExit); var handleExiting = utils_createChainedFunction(this.handleExiting, onExiting); return external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement(Transition_default.a, _extends({ addEndListener: end_default.a }, props, { "aria-expanded": props.role ? props.in : null, onEnter: handleEnter, onEntering: handleEntering, onEntered: handleEntered, onExit: handleExit, onExiting: handleExiting, __source: { fileName: Collapse_jsxFileName, lineNumber: 200 }, __self: this }), function (state, innerProps) { return external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.cloneElement(children, _extends({}, innerProps, { className: classnames_default()(className, children.props.className, collapseStyles[state], _this2.getDimension() === 'width' && 'width') })); }); }; return Collapse; }(external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.Component); Collapse_Collapse.propTypes = Collapse_propTypes; Collapse_Collapse.defaultProps = Collapse_defaultProps; /* harmony default export */ var src_Collapse = (Collapse_Collapse); // CONCATENATED MODULE: ./src/AccordionContext.js /* harmony default export */ var AccordionContext = (external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createContext(null)); // CONCATENATED MODULE: ./src/AccordionCollapse.js var AccordionCollapse_jsxFileName = "/Users/jason/src/react-bootstrap/src/AccordionCollapse.js"; var AccordionCollapse_propTypes = { /** * A key that corresponds to the toggler that triggers this collapse's expand or collapse. */ eventKey: prop_types_default.a.string.isRequired, children: prop_types_default.a.element.isRequired }; var AccordionCollapse = external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.forwardRef(function (_ref, ref) { var children = _ref.children, eventKey = _ref.eventKey, props = _objectWithoutPropertiesLoose(_ref, ["children", "eventKey"]); var contextEventKey = Object(external_root_React_commonjs2_react_commonjs_react_amd_react_["useContext"])(AccordionContext); return external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement(src_Collapse, _extends({ ref: ref, in: contextEventKey === eventKey }, props, { __source: { fileName: AccordionCollapse_jsxFileName, lineNumber: 21 }, __self: this }), external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement("div", { __source: { fileName: AccordionCollapse_jsxFileName, lineNumber: 22 }, __self: this }, external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.Children.only(children))); }); AccordionCollapse.propTypes = AccordionCollapse_propTypes; /* harmony default export */ var src_AccordionCollapse = (AccordionCollapse); // CONCATENATED MODULE: ./src/Accordion.js var Accordion_jsxFileName = "/Users/jason/src/react-bootstrap/src/Accordion.js"; var Accordion_propTypes = { /** Set a custom element for this component */ as: prop_types_default.a.elementType, /** @default 'accordion' */ bsPrefix: prop_types_default.a.string, /** The current active key that corresponds to the currently expanded card */ activeKey: prop_types_default.a.string, /** The default active key that is expanded on start */ defaultActiveKey: prop_types_default.a.string }; var Accordion_defaultProps = { as: 'div' }; var Accordion = external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.forwardRef(function (props, ref) { var _useUncontrolled = hook_default()(props, { activeKey: 'onSelect' }), Component = _useUncontrolled.as, activeKey = _useUncontrolled.activeKey, bsPrefix = _useUncontrolled.bsPrefix, children = _useUncontrolled.children, className = _useUncontrolled.className, onSelect = _useUncontrolled.onSelect, controlledProps = _objectWithoutPropertiesLoose(_useUncontrolled, ["as", "activeKey", "bsPrefix", "children", "className", "onSelect"]); bsPrefix = useBootstrapPrefix(bsPrefix, 'accordion'); return external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement(AccordionContext.Provider, { value: activeKey, __source: { fileName: Accordion_jsxFileName, lineNumber: 45 }, __self: this }, external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement(src_SelectableContext.Provider, { value: onSelect, __source: { fileName: Accordion_jsxFileName, lineNumber: 46 }, __self: this }, external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement(Component, _extends({ ref: ref }, controlledProps, { className: classnames_default()(className, bsPrefix), __source: { fileName: Accordion_jsxFileName, lineNumber: 47 }, __self: this }), children))); }); Accordion.propTypes = Accordion_propTypes; Accordion.defaultProps = Accordion_defaultProps; Accordion.Toggle = src_AccordionToggle; Accordion.Collapse = src_AccordionCollapse; /* harmony default export */ var src_Accordion = (Accordion); // EXTERNAL MODULE: ./node_modules/prop-types-extra/lib/index.js var lib = __webpack_require__(19); // EXTERNAL MODULE: ./node_modules/@restart/hooks/useEventCallback.js var useEventCallback = __webpack_require__(8); var useEventCallback_default = /*#__PURE__*/__webpack_require__.n(useEventCallback); // EXTERNAL MODULE: ./node_modules/dom-helpers/util/camelize.js var camelize = __webpack_require__(33); var camelize_default = /*#__PURE__*/__webpack_require__.n(camelize); // CONCATENATED MODULE: ./src/utils/createWithBsPrefix.js var createWithBsPrefix_jsxFileName = "/Users/jason/src/react-bootstrap/src/utils/createWithBsPrefix.js"; var createWithBsPrefix_pascalCase = function pascalCase(str) { return str[0].toUpperCase() + camelize_default()(str).slice(1); }; function createWithBsPrefix(prefix, _temp) { var _ref = _temp === void 0 ? {} : _temp, _ref$displayName = _ref.displayName, displayName = _ref$displayName === void 0 ? createWithBsPrefix_pascalCase(prefix) : _ref$displayName, _ref$Component = _ref.Component, Component = _ref$Component === void 0 ? 'div' : _ref$Component, defaultProps = _ref.defaultProps; var BsComponent = external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.forwardRef( // eslint-disable-next-line react/prop-types function (_ref2, ref) { var className = _ref2.className, bsPrefix = _ref2.bsPrefix, _ref2$as = _ref2.as, Tag = _ref2$as === void 0 ? Component : _ref2$as, props = _objectWithoutPropertiesLoose(_ref2, ["className", "bsPrefix", "as"]); var resolvedPrefix = useBootstrapPrefix(bsPrefix, prefix); return external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement(Tag, _extends({ ref: ref, className: classnames_default()(className, resolvedPrefix) }, props, { __source: { fileName: createWithBsPrefix_jsxFileName, lineNumber: 18 }, __self: this })); }); BsComponent.defaultProps = defaultProps; BsComponent.displayName = displayName; return BsComponent; } // CONCATENATED MODULE: ./src/utils/divWithClassName.js var divWithClassName_jsxFileName = "/Users/jason/src/react-bootstrap/src/utils/divWithClassName.js"; /* harmony default export */ var divWithClassName = (function (className) { return external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.forwardRef(function (p, ref) { return external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement("div", _extends({}, p, { ref: ref, className: classnames_default()(p.className, className), __source: { fileName: divWithClassName_jsxFileName, lineNumber: 6 }, __self: this })); }); }); // CONCATENATED MODULE: ./src/Fade.js var _fadeStyles, Fade_jsxFileName = "/Users/jason/src/react-bootstrap/src/Fade.js"; var Fade_propTypes = { /** * Show the component; triggers the fade in or fade out animation */ in: prop_types_default.a.bool, /** * Wait until the first "enter" transition to mount the component (add it to the DOM) */ mountOnEnter: prop_types_default.a.bool, /** * Unmount the component (remove it from the DOM) when it is faded out */ unmountOnExit: prop_types_default.a.bool, /** * Run the fade in animation when the component mounts, if it is initially * shown */ appear: prop_types_default.a.bool, /** * Duration of the fade animation in milliseconds, to ensure that finishing * callbacks are fired even if the original browser transition end events are * canceled */ timeout: prop_types_default.a.number, /** * Callback fired before the component fades in */ onEnter: prop_types_default.a.func, /** * Callback fired after the component starts to fade in */ onEntering: prop_types_default.a.func, /** * Callback fired after the has component faded in */ onEntered: prop_types_default.a.func, /** * Callback fired before the component fades out */ onExit: prop_types_default.a.func, /** * Callback fired after the component starts to fade out */ onExiting: prop_types_default.a.func, /** * Callback fired after the component has faded out */ onExited: prop_types_default.a.func }; var Fade_defaultProps = { in: false, timeout: 300, mountOnEnter: false, unmountOnExit: false, appear: false }; var fadeStyles = (_fadeStyles = {}, _fadeStyles[react_transition_group_Transition["ENTERING"]] = 'show', _fadeStyles[react_transition_group_Transition["ENTERED"]] = 'show', _fadeStyles); var Fade_Fade = /*#__PURE__*/ function (_React$Component) { _inheritsLoose(Fade, _React$Component); function Fade() { var _this; for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { args[_key] = arguments[_key]; } _this = _React$Component.call.apply(_React$Component, [this].concat(args)) || this; _this.handleEnter = function (node) { triggerBrowserReflow(node); if (_this.props.onEnter) _this.props.onEnter(node); }; return _this; } var _proto = Fade.prototype; _proto.render = function render() { var _this$props = this.props, className = _this$props.className, children = _this$props.children, props = _objectWithoutPropertiesLoose(_this$props, ["className", "children"]); return external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement(Transition_default.a, _extends({ addEndListener: end_default.a }, props, { onEnter: this.handleEnter, __source: { fileName: Fade_jsxFileName, lineNumber: 89 }, __self: this }), function (status, innerProps) { return external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.cloneElement(children, _extends({}, innerProps, { className: classnames_default()('fade', className, children.props.className, fadeStyles[status]) })); }); }; return Fade; }(external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.Component); Fade_Fade.propTypes = Fade_propTypes; Fade_Fade.defaultProps = Fade_defaultProps; /* harmony default export */ var src_Fade = (Fade_Fade); // CONCATENATED MODULE: ./src/CloseButton.js var CloseButton_jsxFileName = "/Users/jason/src/react-bootstrap/src/CloseButton.js"; var CloseButton_propTypes = { label: prop_types_default.a.string.isRequired, onClick: prop_types_default.a.func }; var CloseButton_defaultProps = { label: 'Close' }; var CloseButton = external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.forwardRef(function (_ref, ref) { var label = _ref.label, onClick = _ref.onClick; return external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement("button", { ref: ref, type: "button", className: "close", onClick: onClick, __source: { fileName: CloseButton_jsxFileName, lineNumber: 14 }, __self: this }, external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement("span", { "aria-hidden": "true", __source: { fileName: CloseButton_jsxFileName, lineNumber: 15 }, __self: this }, "\xD7"), external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement("span", { className: "sr-only", __source: { fileName: CloseButton_jsxFileName, lineNumber: 16 }, __self: this }, label)); }); CloseButton.displayName = 'CloseButton'; CloseButton.propTypes = CloseButton_propTypes; CloseButton.defaultProps = CloseButton_defaultProps; /* harmony default export */ var src_CloseButton = (CloseButton); // CONCATENATED MODULE: ./node_modules/@babel/runtime/helpers/esm/assertThisInitialized.js function _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return self; } // CONCATENATED MODULE: ./src/SafeAnchor.js var SafeAnchor_jsxFileName = "/Users/jason/src/react-bootstrap/src/SafeAnchor.js"; var SafeAnchor_propTypes = { href: prop_types_default.a.string, onClick: prop_types_default.a.func, onKeyDown: prop_types_default.a.func, disabled: prop_types_default.a.bool, role: prop_types_default.a.string, tabIndex: prop_types_default.a.oneOfType([prop_types_default.a.number, prop_types_default.a.string]), /** * this is sort of silly but needed for Button */ as: prop_types_default.a.elementType, /** @private */ innerRef: prop_types_default.a.any }; var SafeAnchor_defaultProps = { as: 'a' }; function isTrivialHref(href) { return !href || href.trim() === '#'; } /** * There are situations due to browser quirks or Bootstrap CSS where * an anchor tag is needed, when semantically a button tag is the * better choice. SafeAnchor ensures that when an anchor is used like a * button its accessible. It also emulates input `disabled` behavior for * links, which is usually desirable for Buttons, NavItems, DropdownItems, etc. */ var SafeAnchor_SafeAnchor = /*#__PURE__*/ function (_React$Component) { _inheritsLoose(SafeAnchor, _React$Component); function SafeAnchor(props, context) { var _this; _this = _React$Component.call(this, props, context) || this; _this.handleClick = _this.handleClick.bind(_assertThisInitialized(_this)); _this.handleKeyDown = _this.handleKeyDown.bind(_assertThisInitialized(_this)); return _this; } var _proto = SafeAnchor.prototype; _proto.handleClick = function handleClick(event) { var _this$props = this.props, disabled = _this$props.disabled, href = _this$props.href, onClick = _this$props.onClick; if (disabled || isTrivialHref(href)) { event.preventDefault(); } if (disabled) { event.stopPropagation(); return; } if (onClick) { onClick(event); } }; _proto.handleKeyDown = function handleKeyDown(event) { if (event.key === ' ') { event.preventDefault(); this.handleClick(event); } }; _proto.render = function render() { var _this$props2 = this.props, Component = _this$props2.as, disabled = _this$props2.disabled, onKeyDown = _this$props2.onKeyDown, innerRef = _this$props2.innerRef, props = _objectWithoutPropertiesLoose(_this$props2, ["as", "disabled", "onKeyDown", "innerRef"]); if (isTrivialHref(props.href)) { props.role = props.role || 'button'; // we want to make sure there is a href attribute on the node // otherwise, the cursor incorrectly styled (except with role='button') props.href = props.href || '#'; } if (disabled) { props.tabIndex = -1; props['aria-disabled'] = true; } if (innerRef) props.ref = innerRef; return external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement(Component, _extends({}, props, { onClick: this.handleClick, onKeyDown: utils_createChainedFunction(this.handleKeyDown, onKeyDown), __source: { fileName: SafeAnchor_jsxFileName, lineNumber: 92 }, __self: this })); }; return SafeAnchor; }(external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.Component); SafeAnchor_SafeAnchor.propTypes = SafeAnchor_propTypes; SafeAnchor_SafeAnchor.defaultProps = SafeAnchor_defaultProps; /* harmony default export */ var src_SafeAnchor = (SafeAnchor_SafeAnchor); // CONCATENATED MODULE: ./src/Alert.js var Alert_jsxFileName = "/Users/jason/src/react-bootstrap/src/Alert.js"; var Alert_propTypes = { /** * @default 'alert' */ bsPrefix: prop_types_default.a.string, /** * The Alert visual variant * * @type {'primary' | 'secondary' | 'success' | 'danger' | 'warning' | 'info' | 'dark' | 'light'} */ variant: prop_types_default.a.string, /** * Renders a properly aligned dismiss button, as well as * adding extra horizontal padding to the Alert. */ dismissible: prop_types_default.a.bool, /** * Controls the visual state of the Alert. * * @controllable onClose */ show: prop_types_default.a.bool, /** * Callback fired when alert is closed. * * @controllable show */ onClose: prop_types_default.a.func, /** * Sets the text for alert close button. */ closeLabel: prop_types_default.a.string, /** A `react-transition-group` Transition component used to animate the Alert on dismissal. */ transition: lib["elementType"] }; var Alert_defaultProps = { show: true, transition: src_Fade, closeLabel: 'Close alert' }; var controllables = { show: 'onClose' }; var Alert = external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.forwardRef(function (uncontrolledProps, ref) { var _useControllable = hook_default()(uncontrolledProps, controllables), bsPrefix = _useControllable.bsPrefix, show = _useControllable.show, closeLabel = _useControllable.closeLabel, className = _useControllable.className, children = _useControllable.children, variant = _useControllable.variant, onClose = _useControllable.onClose, dismissible = _useControllable.dismissible, Transition = _useControllable.transition, props = _objectWithoutPropertiesLoose(_useControllable, ["bsPrefix", "show", "closeLabel", "className", "children", "variant", "onClose", "dismissible", "transition"]); var prefix = useBootstrapPrefix(bsPrefix, 'alert'); var handleClose = useEventCallback_default()(function (e) { onClose(false, e); }); var alert = external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement("div", _extends({ role: "alert" }, Transition ? props : undefined, { className: classnames_default()(className, prefix, variant && prefix + "-" + variant, dismissible && prefix + "-dismissible"), __source: { fileName: Alert_jsxFileName, lineNumber: 87 }, __self: this }), dismissible && external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement(src_CloseButton, { onClick: handleClose, label: closeLabel, __source: { fileName: Alert_jsxFileName, lineNumber: 97 }, __self: this }), children); if (!Transition) return show ? alert : null; return external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement(Transition, _extends({ unmountOnExit: true, ref: ref }, props, { in: show, __source: { fileName: Alert_jsxFileName, lineNumber: 105 }, __self: this }), alert); }); var DivStyledAsH4 = divWithClassName('h4'); DivStyledAsH4.displayName = 'DivStyledAsH4'; Alert.displayName = 'Alert'; Alert.propTypes = Alert_propTypes; Alert.defaultProps = Alert_defaultProps; Alert.Link = createWithBsPrefix('alert-link', { Component: src_SafeAnchor }); Alert.Heading = createWithBsPrefix('alert-heading', { Component: DivStyledAsH4 }); /* harmony default export */ var src_Alert = (Alert); // CONCATENATED MODULE: ./src/Badge.js var Badge_jsxFileName = "/Users/jason/src/react-bootstrap/src/Badge.js"; var Badge_propTypes = { /** @default 'badge' */ bsPrefix: prop_types_default.a.string, /** * The visual style of the badge * * @type {('primary'|'secondary'|'success'|'danger'|'warning'|'info'|'light'|'dark')} */ variant: prop_types_default.a.string, /** * Add the `pill` modifier to make badges more rounded with * some additional horizontal padding */ pill: prop_types_default.a.bool.isRequired }; var Badge_defaultProps = { pill: false }; var Badge = external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.forwardRef(function (_ref, ref) { var bsPrefix = _ref.bsPrefix, variant = _ref.variant, pill = _ref.pill, className = _ref.className, props = _objectWithoutPropertiesLoose(_ref, ["bsPrefix", "variant", "pill", "className"]); var prefix = useBootstrapPrefix(bsPrefix, 'badge'); return external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement("span", _extends({ ref: ref }, props, { className: classnames_default()(className, prefix, pill && prefix + "-pill", variant && prefix + "-" + variant), __source: { fileName: Badge_jsxFileName, lineNumber: 33 }, __self: this })); }); Badge.displayName = 'Badge'; Badge.propTypes = Badge_propTypes; Badge.defaultProps = Badge_defaultProps; /* harmony default export */ var src_Badge = (Badge); // CONCATENATED MODULE: ./src/BreadcrumbItem.js var BreadcrumbItem_jsxFileName = "/Users/jason/src/react-bootstrap/src/BreadcrumbItem.js"; var BreadcrumbItem_propTypes = { /** * @default 'breadcrumb-item' */ bsPrefix: prop_types_default.a.string, /** * Adds a visual "active" state to a Breadcrumb * Item and disables the link. */ active: prop_types_default.a.bool, /** * `href` attribute for the inner `a` element */ href: prop_types_default.a.string, /** * `title` attribute for the inner `a` element */ title: prop_types_default.a.node, /** * `target` attribute for the inner `a` element */ target: prop_types_default.a.string, as: prop_types_default.a.elementType }; var BreadcrumbItem_defaultProps = { active: false, as: 'li' }; var BreadcrumbItem = external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.forwardRef(function (_ref, ref) { var bsPrefix = _ref.bsPrefix, active = _ref.active, className = _ref.className, Component = _ref.as, props = _objectWithoutPropertiesLoose(_ref, ["bsPrefix", "active", "className", "as"]); var prefix = useBootstrapPrefix(bsPrefix, 'breadcrumb-item'); var href = props.href, title = props.title, target = props.target, elementProps = _objectWithoutPropertiesLoose(props, ["href", "title", "target"]); var linkProps = { href: href, title: title, target: target }; return external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement(Component, { ref: ref, className: classnames_default()(prefix, className, { active: active }), "aria-current": active ? 'page' : undefined, __source: { fileName: BreadcrumbItem_jsxFileName, lineNumber: 47 }, __self: this }, active ? external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement("span", _extends({}, elementProps, { className: classnames_default()({ active: active }), __source: { fileName: BreadcrumbItem_jsxFileName, lineNumber: 53 }, __self: this })) : external_root_React_commonjs2_react_commonjs_react_amd_react_default.a.createElement(src_SafeAnchor, _extends({}, elementProps, linkProps, { __source: { fileName: BreadcrumbItem_jsxFileName, lineNumber: 55 }, __self: this }))); }); BreadcrumbItem.displayName = 'BreadcrumbItem'; BreadcrumbItem.propTypes = BreadcrumbItem_propTypes; BreadcrumbItem.defaultProps = BreadcrumbItem_defaultProps; /* harmony default export */ var src_BreadcrumbItem = (BreadcrumbItem); // CONCATENATED MODULE: ./src/Breadcrumb.js var Breadcrumb_jsxFileName = "/Users/jason/src/react-bootstrap/src/Breadcrumb.js"; var Breadcrumb_propTypes = { /** * @default 'breadcrumb' */ bsPrefix: prop_types_default.a.string, /** * ARIA label for the nav element * https://www.w3.org/TR/wai-aria-practices/#breadcrumb */ label: prop_types_default.a.string, /** * Additional props passed as-is to the underlying `