Repository: Azure/acr Branch: main Commit: 4477d3db980c Files: 166 Total size: 607.4 KB Directory structure: gitextract_umt5drn5/ ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ ├── bug_report.md │ │ ├── feature_request.md │ │ └── roadmap-template.yml │ └── workflows/ │ ├── nodejs.yml │ └── stale.yml ├── .gitignore ├── LICENSE.txt ├── README.md ├── SECURITY.md ├── docs/ │ ├── .gitignore │ ├── .vuepress/ │ │ └── config.js │ ├── AAD-OAuth.md │ ├── FAQ.md │ ├── README.md │ ├── Token-BasicAuth.md │ ├── Troubleshooting Guide.md │ ├── acr-roadmap.md │ ├── aks-acr-across-tenants.md │ ├── artifact-media-types.json │ ├── blog/ │ │ ├── abac-repo-permissions.md │ │ ├── connected-registry.md │ │ └── teleport.md │ ├── container-registry-consuming-public-content.md │ ├── container-registry-oras-artifacts.md │ ├── contributing-to-pages.md │ ├── custom-domain/ │ │ ├── README.md │ │ └── deprecated/ │ │ ├── docker-vm-deploy/ │ │ │ ├── azuredeploy.json │ │ │ ├── azuredeploy.parameters.json │ │ │ ├── deploy-nginx-docker.sh │ │ │ ├── deploy.ps1 │ │ │ ├── docker-compose.yml.template │ │ │ ├── nginx.conf.template │ │ │ └── setup-certs.sh │ │ ├── key-vault-setup/ │ │ │ ├── ensure-vault.ps1 │ │ │ └── upload-cert.ps1 │ │ └── registry-setup-deprecated.md │ ├── deploy.sh │ ├── http-headers.md │ ├── image-signing.md │ ├── image-transfer/ │ │ ├── ExportPipelines/ │ │ │ ├── azuredeploy.json │ │ │ └── azuredeploy.parameters.json │ │ ├── ImportPipelines/ │ │ │ ├── azuredeploy.json │ │ │ └── azuredeploy.parameters.json │ │ ├── PipelineRun/ │ │ │ ├── PipelineRun-Export/ │ │ │ │ ├── azuredeploy.json │ │ │ │ └── azuredeploy.parameters.json │ │ │ └── PipelineRun-Import/ │ │ │ ├── azuredeploy.json │ │ │ └── azuredeploy.parameters.json │ │ └── README.md │ ├── integration/ │ │ ├── CircleCI.md │ │ ├── change-analysis/ │ │ │ └── README.md │ │ └── github-actions/ │ │ ├── Dockerfile │ │ ├── github-actions.md │ │ └── main.workflow │ ├── move-repositories-to-new-registry/ │ │ └── README.md │ ├── package.json │ ├── preview/ │ │ ├── abac-repo-permissions/ │ │ │ └── README.md │ │ ├── artifact-streaming/ │ │ │ └── README.md │ │ ├── connected-registry/ │ │ │ ├── README.md │ │ │ ├── connected-registry-error-codes.md │ │ │ ├── intro-connected-registry.md │ │ │ ├── overview-connected-registry-access.md │ │ │ ├── overview-connected-registry-and-iot-edge.md │ │ │ ├── quickstart-connected-registry-cli.md │ │ │ ├── quickstart-deploy-connected-registry-iot-edge-cli.md │ │ │ ├── quickstart-deploy-connected-registry-kubernetes-v2.md │ │ │ ├── quickstart-deploy-connected-registry-kubernetes.md │ │ │ ├── quickstart-deploy-connected-registry-nested-iot-edge-cli.md │ │ │ ├── quickstart-pull-images-from-connected-registry.md │ │ │ ├── quickstart-send-connected-registry-events-to-event-grid.md │ │ │ ├── quickstart-view-connected-registry-repos-and-tags.md │ │ │ ├── release-notes.md │ │ │ └── troubleshooting.md │ │ ├── continuous-patching/ │ │ │ └── README.md │ │ ├── quarantine/ │ │ │ ├── quarantine-details/ │ │ │ │ ├── example.json │ │ │ │ └── schema.json │ │ │ └── readme.md │ │ └── regional-endpoints/ │ │ └── regional-endpoints.md │ ├── roles-and-permissions.md │ ├── tasks/ │ │ ├── agentpool/ │ │ │ └── README.md │ │ ├── buildx/ │ │ │ ├── README.md │ │ │ ├── bootstrap.yaml │ │ │ ├── build.yaml │ │ │ ├── build_with_cache.yaml │ │ │ └── build_with_cache_2.yaml │ │ ├── container-registry-tasks-overview.md │ │ ├── container-registry-tasks-walkthrough.md │ │ ├── run-as-deployment/ │ │ │ ├── README.md │ │ │ ├── quickdockerbuild/ │ │ │ │ ├── README.md │ │ │ │ ├── azuredeploy.json │ │ │ │ └── azuredeploy.parameters.json │ │ │ ├── quickdockerbuild-on-existing-registry/ │ │ │ │ ├── README.md │ │ │ │ ├── azuredeploy.json │ │ │ │ └── azuredeploy.parameters.json │ │ │ ├── quickdockerbuildusingidentitykeyvault/ │ │ │ │ ├── README.md │ │ │ │ ├── azuredeploy.json │ │ │ │ └── azuredeploy.parameters.json │ │ │ ├── quickdockerbuildwithidentity/ │ │ │ │ ├── README.md │ │ │ │ ├── azuredeploy.json │ │ │ │ └── azuredeploy.parameters.json │ │ │ ├── quickrun/ │ │ │ │ ├── README.md │ │ │ │ ├── azuredeploy.json │ │ │ │ └── azuredeploy.parameters.json │ │ │ └── taskrun/ │ │ │ ├── README.md │ │ │ ├── azuredeploy.json │ │ │ └── azuredeploy.parameters.json │ │ └── triggers/ │ │ └── private-base-image-update.md │ └── teleport/ │ ├── README.md │ ├── aks-getting-started.md │ ├── aks-teleport-comparison.md │ ├── check-expansion.sh │ ├── collecting-teleportd-logs-aks.md │ ├── edit-teleport-attribute.sh │ ├── find-teleport-enabled-repositories.sh │ ├── samples/ │ │ ├── azure-vote-shuttle.yaml │ │ └── azure-vote-teleport.yaml │ └── teleport-repository-management.md ├── notifications/ │ ├── README.md │ └── helm-repo-failure-20200918-.md └── samples/ ├── dotnetcore/ │ ├── image-transfer/ │ │ ├── ContainerRegistryTransfer/ │ │ │ ├── Clients/ │ │ │ │ ├── ExportClient.cs │ │ │ │ └── ImportClient.cs │ │ │ ├── ContainerRegistryTransfer.csproj │ │ │ ├── Helpers/ │ │ │ │ ├── AzureHelper.cs │ │ │ │ ├── IdentityHelper.cs │ │ │ │ └── KeyVaultHelper.cs │ │ │ ├── Models/ │ │ │ │ ├── Options.cs │ │ │ │ ├── PipelineConfig.cs │ │ │ │ └── PipelineRunConfig.cs │ │ │ ├── Program.cs │ │ │ └── appsettings.json │ │ ├── ContainerRegistryTransfer.sln │ │ └── README.md │ └── registry-artifact-transfer/ │ ├── README.md │ └── src/ │ ├── Configurations/ │ │ ├── AzureEnvironmentConfiguration.cs │ │ ├── ExportConfiguration.cs │ │ ├── IdentityConfiguration.cs │ │ ├── ImportConfiguration.cs │ │ ├── RegistryConfiguration.cs │ │ ├── SourceRegistryConfiguration.cs │ │ └── TransferDefinition.cs │ ├── Program.cs │ ├── Registry.cs │ ├── RegistryArtifactTransfer.csproj │ ├── RepositoryProvider/ │ │ ├── CatalogApiResponse.cs │ │ ├── HttpMessageExtensions.cs │ │ ├── RepositoryProviderV2.cs │ │ └── TagListApiResponse.cs │ ├── ResourceId.cs │ ├── TaskExtensions.cs │ ├── Transfer/ │ │ ├── ArtifactProvider.cs │ │ ├── BlobCopier.cs │ │ ├── ExportJob.cs │ │ ├── ExportWorker.cs │ │ ├── ImportJob.cs │ │ ├── ImportWorker.cs │ │ ├── TransferClient.cs │ │ └── TransferJobStatus.cs │ ├── TransferReport.cs │ ├── TransferResult.cs │ └── transferdefinition.json └── java/ └── task/ ├── .factorypath ├── .gitignore ├── Dockerfile ├── README.md ├── acb.yaml ├── pom.xml └── src/ └── main/ └── java/ └── com/ └── microsoft/ └── azure/ └── management/ └── containerregistry/ └── samples/ └── ManageTask.java ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.md ================================================ --- name: Bug report about: Create a report to help us improve title: '' labels: bug assignees: '' --- **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior: 1. 2. 3. **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Any relevant environment information** - OS: [e.g. Ubuntu, Windows] - Azure CLI/PowerShell/SDK version - Docker version - Datetime (UTC) when the issue occurred - Registry and image names **Additional context** Add any other context about the problem here. If any information is a concern to post here, you can create a [support ticket](https://azure.microsoft.com/en-us/support/create-ticket/) or send an email to acrsup@microsoft.com. ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.md ================================================ --- name: Feature request about: Suggest an idea for the Azure Container Registry title: '' labels: feature-request assignees: '' --- **What is the problem you're trying to solve** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Additional context** Add any other context or screenshots about the feature request here. ================================================ FILE: .github/ISSUE_TEMPLATE/roadmap-template.yml ================================================ name: Roadmap Feature Request description: This template is primarily used by the product group to manage roadmap features in the areas of cloud-native security, registries such as Azure Container Registry (ACR) and Microsoft Artifact Registry (MAR/MCR), open-source integration, and more. labels: [feature-request, roadmap] body: - type: markdown attributes: value: | A roadmap feature could include areas such as cloud-native security, registries like Azure Container Registry (ACR) and Microsoft Artifact Registry (MAR/MCR), open-source integration, and more. - type: textarea id: problem validations: required: true attributes: label: "Motivation" description: "A clear and concise description of the motivation behind this feature request. Why is this feature important? What problem does it solve or what opportunity does it create?" - type: textarea id: solution validations: required: true attributes: label: "Description" description: "A clear and concise description of the feature request. What is the feature about? Include any relevant details, success criteria or specifications." - type: textarea id: context validations: required: false attributes: label: "Additional context" description: "Add any additional context about the roadmap feature." ================================================ FILE: .github/workflows/nodejs.yml ================================================ name: GH-Page Publish on: push: branches: - main - test-pages jobs: build: runs-on: ubuntu-latest strategy: matrix: node-version: [10.x] steps: - uses: actions/checkout@v1 - name: Use Node.js ${{ matrix.node-version }} uses: actions/setup-node@v1 with: node-version: ${{ matrix.node-version }} - name: Deploy Pages run: | eval "$(ssh-agent -s)" ssh-add - <<< "${DEPLOY_KEY}" cd docs ./deploy.sh env: GH_REPOSITORY : ${{ github.repository }} GIT_SSH_COMMAND: "ssh -o StrictHostKeyChecking=no" DEPLOY_KEY: ${{ secrets.DEPLOY_KEY }} SSH_AUTH_SOCK: /tmp/ssh_agent.sock CI: true ================================================ FILE: .github/workflows/stale.yml ================================================ name: "Close stale issues and PRs" on: schedule: - cron: "30 1 * * *" permissions: issues: write pull-requests: write jobs: stale: runs-on: ubuntu-latest steps: - uses: actions/stale@v8 with: stale-issue-message: "This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 30 days." stale-pr-message: "This PR is stale because it has been open 45 days with no activity. Remove stale label or comment or this will be closed in 30 days." close-issue-message: "This issue was closed because it has been stalled for 30 days with no activity." close-pr-message: "This PR was closed because it has been stalled for 30 days with no activity." days-before-issue-stale: 60 days-before-pr-stale: 45 days-before-issue-close: 30 days-before-pr-close: 30 exempt-all-milestones: true exempt-issue-labels: 'feature-request' ================================================ FILE: .gitignore ================================================ ## Ignore Visual Studio temporary files, build results, and ## files generated by popular Visual Studio add-ons. ## ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore # User-specific files *.rsuser *.suo *.user *.userosscache *.sln.docstates # User-specific files (MonoDevelop/Xamarin Studio) *.userprefs # Mono auto generated files mono_crash.* # Build results [Dd]ebug/ [Dd]ebugPublic/ [Rr]elease/ [Rr]eleases/ x64/ x86/ [Ww][Ii][Nn]32/ [Aa][Rr][Mm]/ [Aa][Rr][Mm]64/ bld/ [Bb]in/ [Oo]bj/ [Ll]og/ [Ll]ogs/ # Visual Studio 2015/2017 cache/options directory .vs/ # Uncomment if you have tasks that create the project's static files in wwwroot #wwwroot/ # Visual Studio 2017 auto generated files Generated\ Files/ # MSTest test Results [Tt]est[Rr]esult*/ [Bb]uild[Ll]og.* # NUnit *.VisualState.xml TestResult.xml nunit-*.xml # Build Results of an ATL Project [Dd]ebugPS/ [Rr]eleasePS/ dlldata.c # Benchmark Results BenchmarkDotNet.Artifacts/ # .NET Core project.lock.json project.fragment.lock.json artifacts/ # ASP.NET Scaffolding ScaffoldingReadMe.txt # StyleCop StyleCopReport.xml # Files built by Visual Studio *_i.c *_p.c *_h.h *.ilk *.meta *.obj *.iobj *.pch *.pdb *.ipdb *.pgc *.pgd *.rsp *.sbr *.tlb *.tli *.tlh *.tmp *.tmp_proj *_wpftmp.csproj *.log *.vspscc *.vssscc .builds *.pidb *.svclog *.scc # Chutzpah Test files _Chutzpah* # Visual C++ cache files ipch/ *.aps *.ncb *.opendb *.opensdf *.sdf *.cachefile *.VC.db *.VC.VC.opendb # Visual Studio profiler *.psess *.vsp *.vspx *.sap # Visual Studio Trace Files *.e2e # TFS 2012 Local Workspace $tf/ # Guidance Automation Toolkit *.gpState # ReSharper is a .NET coding add-in _ReSharper*/ *.[Rr]e[Ss]harper *.DotSettings.user # TeamCity is a build add-in _TeamCity* # DotCover is a Code Coverage Tool *.dotCover # AxoCover is a Code Coverage Tool .axoCover/* !.axoCover/settings.json # Coverlet is a free, cross platform Code Coverage Tool coverage*.json coverage*.xml coverage*.info # Visual Studio code coverage results *.coverage *.coveragexml # NCrunch _NCrunch_* .*crunch*.local.xml nCrunchTemp_* # MightyMoose *.mm.* AutoTest.Net/ # Web workbench (sass) .sass-cache/ # Installshield output folder [Ee]xpress/ # DocProject is a documentation generator add-in DocProject/buildhelp/ DocProject/Help/*.HxT DocProject/Help/*.HxC DocProject/Help/*.hhc DocProject/Help/*.hhk DocProject/Help/*.hhp DocProject/Help/Html2 DocProject/Help/html # Click-Once directory publish/ # Publish Web Output *.[Pp]ublish.xml *.azurePubxml # Note: Comment the next line if you want to checkin your web deploy settings, # but database connection strings (with potential passwords) will be unencrypted *.pubxml *.publishproj # Microsoft Azure Web App publish settings. Comment the next line if you want to # checkin your Azure Web App publish settings, but sensitive information contained # in these scripts will be unencrypted PublishScripts/ # NuGet Packages *.nupkg # NuGet Symbol Packages *.snupkg # The packages folder can be ignored because of Package Restore **/[Pp]ackages/* # except build/, which is used as an MSBuild target. !**/[Pp]ackages/build/ # Uncomment if necessary however generally it will be regenerated when needed #!**/[Pp]ackages/repositories.config # NuGet v3's project.json files produces more ignorable files *.nuget.props *.nuget.targets # Microsoft Azure Build Output csx/ *.build.csdef # Microsoft Azure Emulator ecf/ rcf/ # Windows Store app package directories and files AppPackages/ BundleArtifacts/ Package.StoreAssociation.xml _pkginfo.txt *.appx *.appxbundle *.appxupload # Visual Studio cache files # files ending in .cache can be ignored *.[Cc]ache # but keep track of directories ending in .cache !?*.[Cc]ache/ # Others ClientBin/ ~$* *~ *.dbmdl *.dbproj.schemaview *.jfm *.pfx *.publishsettings orleans.codegen.cs # Including strong name files can present a security risk # (https://github.com/github/gitignore/pull/2483#issue-259490424) #*.snk # Since there are multiple workflows, uncomment next line to ignore bower_components # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) #bower_components/ # RIA/Silverlight projects Generated_Code/ # Backup & report files from converting an old project file # to a newer Visual Studio version. Backup files are not needed, # because we have git ;-) _UpgradeReport_Files/ Backup*/ UpgradeLog*.XML UpgradeLog*.htm ServiceFabricBackup/ *.rptproj.bak # SQL Server files *.mdf *.ldf *.ndf # Business Intelligence projects *.rdl.data *.bim.layout *.bim_*.settings *.rptproj.rsuser *- [Bb]ackup.rdl *- [Bb]ackup ([0-9]).rdl *- [Bb]ackup ([0-9][0-9]).rdl # Microsoft Fakes FakesAssemblies/ # GhostDoc plugin setting file *.GhostDoc.xml # Node.js Tools for Visual Studio .ntvs_analysis.dat node_modules/ # Visual Studio 6 build log *.plg # Visual Studio 6 workspace options file *.opt # Visual Studio 6 auto-generated workspace file (contains which files were open etc.) *.vbw # Visual Studio LightSwitch build output **/*.HTMLClient/GeneratedArtifacts **/*.DesktopClient/GeneratedArtifacts **/*.DesktopClient/ModelManifest.xml **/*.Server/GeneratedArtifacts **/*.Server/ModelManifest.xml _Pvt_Extensions # Paket dependency manager .paket/paket.exe paket-files/ # FAKE - F# Make .fake/ # CodeRush personal settings .cr/personal # Python Tools for Visual Studio (PTVS) __pycache__/ *.pyc # Cake - Uncomment if you are using it # tools/** # !tools/packages.config # Tabs Studio *.tss # Telerik's JustMock configuration file *.jmconfig # BizTalk build output *.btp.cs *.btm.cs *.odx.cs *.xsd.cs # OpenCover UI analysis results OpenCover/ # Azure Stream Analytics local run output ASALocalRun/ # MSBuild Binary and Structured Log *.binlog # NVidia Nsight GPU debugger configuration file *.nvuser # MFractors (Xamarin productivity tool) working folder .mfractor/ # Local History for Visual Studio .localhistory/ # BeatPulse healthcheck temp database healthchecksdb # Backup folder for Package Reference Convert tool in Visual Studio 2017 MigrationBackup/ # Ionide (cross platform F# VS Code tools) working folder .ionide/ # Fody - auto-generated XML schema FodyWeavers.xsd .vscode/ .DS_Store ================================================ FILE: LICENSE.txt ================================================ Azure Container Registry Samples and Support Copyright (c) Microsoft Corporation All rights reserved. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the ""Software""), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: README.md ================================================ # Azure Container Registry This repo contains [issues](https://github.com/Azure/acr/issues), [samples](./docs), [troubleshooting tips](./docs/Troubleshooting%20Guide.md), and a collection of links for Azure Container Registry. ## Blog posts * [Choosing a Docker Container Registry](https://stevelasker.blog/2018/11/14/choosing-a-docker-container-registry/) * [Key Differences between VM and Container Vulnerability Scanning](https://stevelasker.blog/2018/06/27/key-differences-between-vm-and-container-vulnerability-scanning/) * [Working with Geo-replication notifications](https://stevelasker.blog/2018/01/29/working-with-acr-geo-replication-notifications/) * [User Accounts](https://stevelasker.blog/2016/11/17/azure-container-registry-user-accounts/) * [Docker Tagging Best Practices](https://stevelasker.blog/2018/03/01/docker-tagging-best-practices-for-tagging-and-versioning-docker-images/) * [Deploying Docker Images to Azure Container Instances](https://stevelasker.blog/2017/07/28/deploying-docker-images-from-the-azure-container-registry-to-azure-container-instances/) ## Links A set of short links for presentations & social media. ### General ACR Links | Title | Link | | - | - | | [ACR service](https://aka.ms/acr) | https://aka.ms/acr | | [Tiers](https://aka.ms/acr/tiers) | https://aka.ms/acr/tiers | | [Pricing](https://aka.ms/acr/pricing) | https://aka.ms/acr/pricing | | [Docs](https://aka.ms/acr/docs) | https://aka.ms/acr/docs | | [CLI docs](https://aka.ms/acr/docs/cli) | https://aka.ms/acr/docs/cli | | [REST docs](https://aka.ms/acr/docs/rest) | https://aka.ms/acr/docs/rest | | [Roadmap](https://aka.ms/acr/roadmap) | https://aka.ms/acr/roadmap | ### General ACR Capabilities | Title | Link | | - | - | | [Cross region replication](https://aka.ms/acr/geo-replication) | https://aka.ms/acr/geo-replication | | [In-zone redundancy](https://aka.ms/acr/az) | https://aka.ms/acr/az | | [Helm support](https://aka.ms/acr/helm) | https://aka.ms/acr/helm | | [Supply chain artifact support](https://aka.ms/acr/acr/supply-chain-artifacts) | https://aka.ms/acr/acr/supply-chain-artifacts | | [Importing artifacts](https://aka.ms/acr/import) | https://aka.ms/acr/import | | [Tag locking](https://aka.ms/acr/tag-locking) | https://aka.ms/acr/tag-locking | | [Webhook notifications](https://aka.ms/acr/webhooks) | https://aka.ms/acr/webhooks | | [Auto-purge](https://aka.ms/acr/auto-purge) | https://aka.ms/acr/auto-purge | | [OCI artifacts](https://aka.ms/acr/artifacts) | https://aka.ms/acr/artifacts | | [Artifact streaming](https://aka.ms/acr/artifact-streaming) | [https://aka.ms/acr/artifact-streaming](https://aka.ms/acr/artifact-streaming) | ### Diagnostic & Troubleshooting Links | Title | Link | | - | - | | [Audit logs](https://aka.ms/acr/audit-logs) | https://aka.ms/acr/audit-logs | | [Health check CLI](https://aka.ms/acr/health-check) | https://aka.ms/acr/health-check | ### Security Links | Title | Link | | - | - | | [Authentication](https://aka.ms/acr/authentication) | https://aka.ms/acr/authentication | | [OAuth Authentication](https://aka.ms/acr/auth/oauth) | https://aka.ms/acr/auth/oauth | | [Authorization](https://aka.ms/acr/authorization) | https://aka.ms/acr/authorization | | [Authorization roles and role assignments](https://aka.ms/acr/authentication/roles) | https://aka.ms/acr/authentication/roles | | [Microsoft Entra-based repository permissions](https://aka.ms/acr/auth/abac) | https://aka.ms/acr/auth/abac | | [Azure policies](https://aka.ms/acr/azurepolicy) | https://aka.ms/acr/azurepolicy | | [VNet & firewall rules](https://aka.ms/acr/vnet) | https://aka.ms/acr/vnet | | [Azure private link](https://aka.ms/acr/privatelink) | https://aka.ms/acr/privatelink | | [Dedicated data endpoints](http://aka.ms/acr/dedicated-data-endpoints) | http://aka.ms/acr/dedicated-data-endpoints | | [Customer-managed keys](https://aka.ms/acr/cmk) | https://aka.ms/acr/cmk | | [Content trust / signing](https://aka.ms/acr/content-trust) | https://aka.ms/acr/content-trust | | [Docker content trust Deprecation](https://aka.ms/acr/dctdeprecation) | https://aka.ms/acr/dctdeprecation | | [Quarantine pattern](https://aka.ms/acr/quarantine) | https://aka.ms/acr/quarantine | | [Custom domains (Preview)](https://aka.ms/acr/custom-domains) | https://aka.ms/acr/custom-domains | | [Continuous patching (Preview)](https://aka.ms/acr/patching) | https://aka.ms/acr/patching | ### ACR Tasks | Title | Link | | - | - | | [Tasks](https://aka.ms/acr/tasks) | https://aka.ms/acr/tasks | | [Tasks - Gated Import of Public Content](https://aka.ms/acr/tasks/gated-import) | https://aka.ms/acr/tasks/gated-import | | [Task Scheduling](https://aka.ms/acr/tasks/scheduling) | https://aka.ms/acr/tasks/scheduling | | [Task Timer Cron Expressions](https://aka.ms/acr/tasks/cron) | https://aka.ms/acr/tasks/cron | | [Task Dedicated Agent Pool](https://aka.ms/acr/tasks/agentpool) | https://aka.ms/acr/tasks/agentpool | ## Social Media, Content & ACR Jobs at Microsoft | Title | Link | |-|-| | [Links](https://aka.ms/acr/links) | https://aka.ms/acr/links | | [FAQ](https://aka.ms/acr/faq) | https://aka.ms/acr/faq | | [Presentations](https://aka.ms/acr/presentations) | https://aka.ms/acr/presentations | | [Jobs](https://aka.ms/acr/jobs) | https://aka.ms/acr/jobs | | X #AzureContainerRegistry | https://twitter.com/search?q=%23AzureContainerRegistry | ## Providing feedback | Title | Link | |-|-| | [**Stack Overflow** for community support](https://aka.ms/acr/stack-overflow) | https://aka.ms/acr/stack-overflow | | [**Azure Feedback** for feature requests](https://aka.ms/acr/uservoice) | https://aka.ms/acr/uservoice | | [**GitHub** for logging issues](https://aka.ms/acr/issues) | https://aka.ms/acr/issues | | [**Create a ticket** for general support](https://aka.ms/acr/support/create-ticket) | https://aka.ms/acr/support/create-ticket | ## API and SDK reference * [REST API Reference](https://docs.microsoft.com/rest/api/containerregistry/) * [Swagger Specification](https://github.com/Azure/azure-rest-api-specs/blob/master/specification/containerregistry/resource-manager/Microsoft.ContainerRegistry/stable/2017-10-01/containerregistry.json) * [SDK for Python](https://pypi.python.org/pypi/azure-mgmt-containerregistry) * [SDK for Python-Source](https://github.com/Azure/azure-sdk-for-python/tree/master/azure-mgmt-containerregistry) * [SDK for .NET](https://www.nuget.org/packages/Microsoft.Azure.Management.ContainerRegistry) * [SDK for .NET-Source](https://github.com/Azure/azure-sdk-for-net/tree/master/src/SDKs/ContainerRegistry) ================================================ FILE: SECURITY.md ================================================ ## Security Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. ## Reporting Security Issues **Please do not report security vulnerabilities through public GitHub issues.** Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) * Full paths of source file(s) related to the manifestation of the issue * The location of the affected source code (tag/branch/commit or direct URL) * Any special configuration required to reproduce the issue * Step-by-step instructions to reproduce the issue * Proof-of-concept or exploit code (if possible) * Impact of the issue, including how an attacker might exploit the issue This information will help us triage your report more quickly. If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. ## Preferred Languages We prefer all communications to be in English. ## Policy Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). ================================================ FILE: docs/.gitignore ================================================ gh-pages/ .vuepress/dist/ node_modules ================================================ FILE: docs/.vuepress/config.js ================================================ const currentDateUTC = new Date().toUTCString() module.exports = { title: 'Azure Container Registry', dest: './gh-pages', base: '/acr/', markdown: { lineNumbers: true }, themeConfig: { domain: 'http://azure.github.com', displayAllHeaders: true, sidebar: 'auto', docsDir : 'docs', searchMaxSuggestions: 10, repo: 'azure/acr', repoLabel: 'Star this Repo', editLinks: true, editLinkText: 'Edit this page on GitHub', logo: '/files/acr.svg', sidebar: [ "/", { title: 'Teleport', collapsable: true, children: ['/blog/teleport'], }, { title: 'Tasks', collapsible: true, children: ['/tasks/container-registry-tasks-overview', '/tasks/run-as-deployment/', '/tasks/agentpool/'] }, { title: 'Authentication', collapsable: true, sidebarDepth : 1, children : ['AAD-OAuth', 'Token-BasicAuth'] }, { title: 'Integration', collapsable: true, sidebarDepth : 1, children : ['/integration/change-analysis/'] } ] } } ================================================ FILE: docs/AAD-OAuth.md ================================================ --- type: post title: "AAD Integration" --- # Azure Container Registry integration with Azure Active Directory - [Azure Container Registry integration with Azure Active Directory](#azure-container-registry-integration-with-azure-active-directory) - [Overview](#overview) - [Authenticating to a registry with Azure CLI](#authenticating-to-a-registry-with-azure-cli) - [Listing a repository with Azure CLI](#listing-a-repository-with-azure-cli) - [Azure Container Registry token claim sets](#azure-container-registry-token-claim-sets) - [Getting credentials programmatically](#getting-credentials-programmatically) - [Calling `POST /oauth2/exchange` to get an ACR refresh token](#calling-post-oauth2exchange-to-get-an-acr-refresh-token) - [Authenticating docker with an ACR refresh token](#authenticating-docker-with-an-acr-refresh-token) - [Calling `POST /oauth2/token` to get an ACR access token](#calling-post-oauth2token-to-get-an-acr-access-token) - [Calling `POST /oauth2/token` to get an ACR access token for Helm repository](#calling-post-oauth2token-to-get-an-acr-access-token-for-helm-repository) - [Calling an Azure Container Registry API](#calling-an-azure-container-registry-api) - [Catalog Listing](#catalog-listing) - [Pagination](#pagination) - [Tag Listing](#tag-listing) - [Pagination](#pagination-1) - [Samples API Call scripts](#samples-api-call-scripts) - [Catalog Listing with AAD refresh token](#catalog-listing-with-aad-refresh-token) - [Catalog listing using SP/Admin with Basic Auth](#catalog-listing-using-spadmin-with-basic-auth) - [Catalog listing using Admin Keys with Bearer Auth](#catalog-listing-using-admin-keys-with-bearer-auth) - [Docker login with ACR Access Token - Single repository scope](#docker-login-with-acr-access-token---single-repository-scope) - [Fetch helm index.yaml with Admin Keys or SP with Basic Auth](#fetch-helm-indexyaml-with-admin-keys-or-sp-with-basic-auth) ## Overview The Azure Container Registry allows users to manage a private Docker registry on the cloud. Our service enables customers to store and manage container images across all types of Azure deployments, keep container images near deployments to reduce latency and costs, maintain Windows and Linux container images in a single Docker registry, use familiar, open-source Docker command line interface (CLI) tools, and simplify registry access management with Azure Active Directory. The integration of Azure Container Registry with Azure Active Directory is crucial in order to enable transparent authentication and authorization of users and headless services using AAD credentials. In this scenario, a user will only have to use their AAD credentials to log-in to their private registry, and the Azure Container Service will take care of the authorization validation of each operation using the provided credentials. Under the hood Azure Container Service utilizes the [oauth2](https://oauth.net/2/) authorization protocol, as described by the [Docker Registry v2 authentication via central service documentation](https://docs.docker.com/registry/spec/auth/token/) as well as the [Docker Registry v2 Bearer token specification](https://docs.docker.com/registry/spec/auth/jwt/). The JWT tokens generated by the Azure Container Registry are easy to observe in [jwt.ms](https://jwt.ms/). ## Authenticating to a registry with Azure CLI The process to log in to the registry, from the user's perspective, is simple. The user will use the Microsoft Azure CLI 2.0: ```bash az acr login -n contosoregistry ``` Internally, the CLI will follow these steps: 1. Calls to Azure Resource Manager to resolve the login server for the specified registry. 2. Obtains refresh credentials from the profile in use. For a headless call, this will give you the registered SPN, for a regular user this will give you a refresh token. 3. Makes an HTTPS GET call to the registry server's `/v2` endpoint, without credentials. A bearer token authentication challenge is expected, specifying realm and service values. The realm contains the authentication server's URL. 4. Makes an HTTPS POST call to the authentication server's `POST /oauth2/exchange` endpoint, with a body indicating the grant type, the service, the tenant, and the credentials. 5. From the server's response, we extract an Azure Container Registry refresh token. 6. Pass the refresh token as the password to the Docker CLI, using a null GUID as the username and calling `docker login`. From here on, the docker CLI takes care of the authorization cycle using oauth2. At the end Docker will store the refresh token and go through the oauth2 flow on each operation it does against the Azure Container Registry. ## Listing a repository with Azure CLI The Microsoft Azure CLI 2.0 allows users to also list the repositories registries, and list tags for a repository in a registry. Here's how users can achieve listing the repositories in a registry: ``` az acr repository list -n contosoregistry ``` Internally, the CLI will follow these steps: 1. Calls to Azure Resource Manager to resolve the login server for the specified registry. 2. Obtains refresh credentials from the profile in use. For a headless call, this will give you the registered SPN, for a regular user this will give you a refresh token. 3. Makes an HTTPS GET call to the registry server's `/v2` endpoint, without credentials. A bearer token authentication challenge is expected, specifying realm and service values. The realm contains the authentication server's URL. 4. Makes an HTTPS POST call to the authentication server's `POST /oauth2/exchange` endpoint, with a body indicating the grant type, the service, the tenant, and the credentials. 5. From the server's response we extract an Azure Container Registry refresh token. 6. Makes an HTTPS POST call to the authentication server's `POST /oauth2/token` endpoint, with a body indicating the grant type, the service, the scope, and the Azure Container Registry refresh token. 7. From the server's response we extract an Azure Container Registry access token. 8. Makes an HTTPS GET call to the registry server's `GET /v2/_catalog` endpoint using the access token as the bearer token. 9. Obtains the data from the service and displays it. When listing the tags of a repository, every step above is the same except for the call to the endpoint that gives the tags which is `GET /v2/contosoregistry/tags/list` instead of `GET /v2/_catalog`. # Azure Container Registry token claim sets Following the command of repository list in the previous section: ```bash az acr repository list -n contosoregistry ``` A JWT refresh token extracted at step 5 has the following claim set: ```json { "jti": "365e3b5b-844e-4a21-a38c-4d8aebdd6a06", "sub": "user@contoso.com", "nbf": 1497988712, "exp": 1497990801, "iat": 1497988712, "iss": "Azure Container Registry", "aud": "contosoregistry.azurecr.io", "version": "1.0", "grant_type": "refresh_token", "tenant": "409520d4-8100-4d1d-ad47-72432ddcc120", "permissions": { "actions": [ "*" ], "notActions": [] }, "roles": [] } ``` Followed by an access token at step 7 with the following claim set: ```json { "jti": "ec425c1e-7eda-4f70-adb5-19f927e34a41", "sub": "user@contoso.com", "nbf": 1497988907, "exp": 1497993407, "iat": 1497988907, "iss": "Azure Container Registry", "aud": "contosoregistry.azurecr.io", "version": "1.0", "access": [ { "type": "registry", "name": "catalog", "actions": [ "*" ] } ], "roles": [], "grant_type": "access_token" } ``` # Getting credentials programmatically In order to sign in to a container you'll need to exchange AAD credentials for ACR credentials. The accepted form of credential exchange are: - AAD access token. - [Deprecated] AAD refresh token. - [Deprecated] AAD access token and refresh token. The AAD access token is used to talk to the Azure Resource Manager and query for the set of permissions that the user has for the container registry resource. [Deprecated] The AAD refresh token is used in two ways: 1. If no AAD access token was presented, the AAD refresh token is used to obtain an AAD access token. 2. The AAD refresh token is sent back to the user so they can initiate a token refresh cycle against AAD. If no AAD refresh token is sent, then the client won't have this credential at hand to initiate a credential refresh. The cycle to get credentials looks as follows: 1. Call `POST /oauth2/exchange` presenting the AAD access token or the AAD refresh token [Deprecated]. The service will return you an ACR refresh token. 2. Call `POST /oauth2/token` presenting the ACR refresh token. The service will return you an ACR access token which you can use to call the Azure Container Registry's APIs. ## Calling `POST /oauth2/exchange` to get an ACR refresh token In this example, we'll try to obtain an ACR refresh token from existing AAD tokens. Assume you have the following: 1. A valid container registry, which here we'll call `contosoregistry.azurecr.io`. 2. The AAD tenant identifier associated to the credentials, which here we'll take to be `409520d4-8100-4d1d-ad47-72432ddcc120`. 3. Valid AAD access token credential with access to the aforementioned container registry. The AAD access token can be obtained from the Azure CLI. After running `az login` check file `$HOME/.azure/msal_token_cache.json` (`%HOMEDRIVE%%HOMEPATH%\.azure\msal_token_cache.json` in Windows) for the token values. Alternatively, run `az account get-access-token --subscription ""` to find the AAD access token. By default, the returned access token is for Azure Resource Manager (ARM). To obtain an AAD access token for Azure Container Registry (ACR), run `az account get-access-token --resource=https://containerregistry.azure.net`. We'll now call `POST /oauth2/exchange` to exchange the AAD tokens for an ACR refresh token. Here's how such a call looks when done via `curl`: ```bash registry="contosoregistry.azurecr.io" tenant="409520d4-8100-4d1d-ad47-72432ddcc120" aad_access_token="eyJ...H-g" curl -v -X POST -H "Content-Type: application/x-www-form-urlencoded" -d \ "grant_type=access_token&service=$registry&tenant=$tenant&access_token=$aad_access_token" \ https://$registry/oauth2/exchange ``` The body of the POST message is a querystring-like text that specifies the following values: - `grant_type`, which can take a value of `access_token`, or `access_token_refresh_token` [Deprecated], or `refresh_token` [Deprecated]. - `service`, which must indicate the name of your Azure container registry. - `tenant`, which is the AAD tenant associated to the AAD credentials. - `access_token`, the AAD access token, mandatory when `grant_type` is `access_token` or `access_token_refresh_token` [Deprecated]. - [Deprecated] `refresh_token`, the AAD refresh token, mandatory when `grant_type` is `access_token_refresh_token` or `refresh_token`. The outcome of this operation will be a response with status 200 OK and a body with the following JSON payload: ```json {"refresh_token":"eyJ...L7a"} ``` This response is the ACR refresh token which you can inspect with [jwt.ms](https://jwt.ms/). You can now use it to obtain an ACR access token programmatically or simply send it to the `docker login` command to get docker talking to the Azure Container Registry. ## Authenticating docker with an ACR refresh token Once you have obtained an ACR refresh token, you can use the docker CLI to sign in to your registry like this: ```bash registry="contosoregistry.azurecr.io" acr_username="00000000-0000-0000-0000-000000000000" acr_refresh_token="eyJ...L7a" docker login -u "$acr_username" -p "$acr_refresh_token" $registry ``` The null GUID tells the container registry that this is an ACR refresh token during the login flow. Once the authentication succeeds you can talk to the Azure Container Registry with commands like `docker pull` and `docker push`. For example: ```bash docker pull contosoregistry.azurecr.io/contoso-marketing ``` Notice that the ACR refresh token will be saved by the docker CLI in its credential store, and will be used by the docker CLI to obtain an ACR access token on each operation it performs against the Azure Container Registry. The ACR refresh token is made so it stops working after a period of time, but if you obtained it using either `grant_type=access_token_refresh_token` or `grant_type=refresh_token` then it can be refreshed automatically by installing the [ACR docker credential helper](https://github.com/azure/acr-docker-credential-helper). ## Calling `POST /oauth2/token` to get an ACR access token In this example, we'll try to obtain an ACR access token from existing ACR refresh token, and this access token will only work for the operation we're trying to perform, which is a call to the `GET /v2/_catalog` API. Assume you have the following: 1. A valid container registry, which here we'll call `contosoregistry.azurecr.io`. 2. A valid ACR refresh token. The first thing you want is to obtain an authentication challenge for the operation you want to on the Azure Container Registry. That can be done by targetting the API you want to call without any authentication. Here's how to do that via `curl`: ```bash registry="contosoregistry.azurecr.io" curl -v https://$registry/v2/_catalog ``` Note that `curl` by default does the request as a `GET` unless you specify a different verb with the `-X` modifier. This will output the following payload, with `...` used to shorten it for illustrative purposes: ```html < HTTP/1.1 401 Unauthorized ... < Www-Authenticate: Bearer realm="https://contosoregistry.azurecr.io/oauth2/token",service="contosoregistry.azurecr.io",scope="registry:catalog:*" ... {"errors":[{"code":"UNAUTHORIZED","message":"authentication required","detail":[{"Type":"registry","Name":"catalog","Action":"*"}]}]} ``` Notice the response payload has a header called `Www-Authenticate` that gives us the following information: - The type of challenge: `Bearer`. - The realm of the challenge: `https://contosoregistry.azurecr.io/oauth2/token`. - The service of the challenge: `contosoregistry.azurecr.io`. - The scope of the challenge: `registry:catalog:*`. The body of the payload might provide additional details, but all the information you need is contained in the `Www-Authenticate` header. With this information we're now ready to call `POST /oauth2/token` to obtain an ACR access token that will allow us to use the `GET /v2/_catalog` API. Here's how such a call looks when done via `curl`: ```bash registry="contosoregistry.azurecr.io" acr_refresh_token="eyJ...L7a" scope="registry:catalog:*" curl -v -X POST -H "Content-Type: application/x-www-form-urlencoded" -d \ "grant_type=refresh_token&service=$registry&scope=$scope&refresh_token=$acr_refresh_token" \ https://$registry/oauth2/token ``` The body of the POST message is a querystring-like text that specifies the following values: - `grant_type` which is expected to be `refresh_token`. - `service`, which must indicate the name of your Azure container registry. You obtained this from the `Www-Authenticate` response header from the challenge. - `scope`, which is expected to be a valid [scope](https://docs.docker.com/registry/spec/auth/scope/), and can be specified more than once for multiple scope requests. You obtained this from the `Www-Authenticate` response header from the challenge. - `refresh_token`, which must be a valid ACR refresh token, as obtained by calling `POST /oauth2/exchange`. The outcome of this operation will be a response with status 200 OK and a body with the following JSON payload: ```json {"access_token":"eyJ...xcg"} ``` This response is the ACR access token which you can inspect with [jwt.ms](https://jwt.ms/). You can now use it to call APIs exposed by the Azure Container Registry ## Calling `POST /oauth2/token` to get an ACR access token for Helm repository In this example, we'll try to obtain an ACR access token from existing ACR refresh token to access Helm repository, and this access token will only work for the operation we're trying to perform, which is a call to the `GET /helm/v1/repo/index.yaml` API. Assume you have the following: 1. A valid container registry, which here we'll call `contosoregistry.azurecr.io`. 2. A valid ACR refresh token. The first thing you want is to obtain an authentication challenge for the operation you want to on the Azure Container Registry. That can be done by targetting the API you want to call without any authentication. Here's how to do that via `curl`: ```bash registry="contosoregistry.azurecr.io" curl -v https://$registry/helm/v1/repo/index.yaml ``` Note that `curl` by default does the request as a `GET` unless you specify a different verb with the `-X` modifier. This will output the following payload, with `...` used to shorten it for illustrative purposes: ```bash < HTTP/1.1 401 Unauthorized ... < Www-Authenticate: Bearer realm="https://contosoregistry.azurecr.io/oauth2/token",service="contosoregistry.azurecr.io",scope="artifact-repository:repo:pull" ... {"errors":[{"code":"UNAUTHORIZED","message":"authentication required","detail":[{"Type":"artifact-repository","Name":"repo","Action":"pull"}]}]} ``` Notice the response payload has a header called `Www-Authenticate` that gives us the following information: - The type of challenge: `Bearer`. - The realm of the challenge: `https://contosoregistry.azurecr.io/oauth2/token`. - The service of the challenge: `contosoregistry.azurecr.io`. - The scope of the challenge: `artifact-repository:repo:pull`. The body of the payload might provide additional details, but all the information you need is contained in the `Www-Authenticate` header. With this information we're now ready to call `POST /oauth2/token` to obtain an ACR access token that will allow us to use the `GET /helm/v1/repo/index.yaml` API. Here's how such a call looks when done via `curl`: ```bash registry="contosoregistry.azurecr.io" acr_refresh_token="eyJ...L7a" scope="artifact-repository:repo:pull" curl -v -X POST -H "Content-Type: application/x-www-form-urlencoded" -d \ "grant_type=refresh_token&service=$registry&scope=$scope&refresh_token=$acr_refresh_token" \ https://$registry/oauth2/token ``` The body of the POST message is a querystring-like text that specifies the following values: - `grant_type` which is expected to be `refresh_token`. - `service`, which must indicate the name of your Azure container registry. You obtained this from the `Www-Authenticate` response header from the challenge. - `scope`, which is expected to be a `artifact-repository:repo:pull` for read operations and `artifact-repository:repo:*` for write operations, and can be specified more than once for multiple scope requests. You obtained this from the `Www-Authenticate` response header from the challenge. - `refresh_token`, which must be a valid ACR refresh token, as obtained by calling `POST /oauth2/exchange`. The outcome of this operation will be a response with status 200 OK and a body with the following JSON payload: ```json {"access_token":"eyJ...xcg"} ``` This response is the ACR access token which you can inspect with [jwt.ms](https://jwt.ms/). You can now use it to call APIs exposed by the Azure Container Registry. Refer the full script to [fetch the helm index.yaml](#fetch-helm-indexyaml). ## Calling an Azure Container Registry API In this example we'll call catalog listing and tag listing APIs on an Azure Container Registry. ### Catalog Listing Assume you have the following: 1. A valid container registry, which here we'll call `contosoregistry.azurecr.io`. 2. A valid ACR access token, created with the correct scope for the API we're going to call. Here's how a call to the `GET /v2/_catalog` API of the given registry would look like when done via `curl`: ```bash registry="contosoregistry.azurecr.io" acr_access_token="eyJ...xcg" curl -v -H "Authorization: Bearer $acr_access_token" https://$registry/v2/_catalog ``` Note that `curl` by default does the request as a `GET` unless you specify a different verb with the `-X` modifier. This should result in a status 200 OK, and a body with a JSON payload listing the repositories held in this registry: ```json {"repositories":["alpine","contoso-marketing","hello-world","node"]} ``` #### Pagination To retrieve paginated catalog results, add an `n` parameter to limit the number or results. We take `n=2` as example: ```bash registry="contosoregistry.azurecr.io" acr_access_token="eyJ...xcg" limit=2 curl -v -H "Authorization: Bearer $acr_access_token" "https://$registry/v2/_catalog?n=$limit" ``` This should result in a status 200 OK, and a body with a JSON payload listing the first `n` repositories held in this registry. If there are more results, a `Link` header containing the request URL for the next result block is also returned. If the entire result set has been received, the `Link` header will not be returned. In this case, the first 2 repositories are returned, and there are more entries in the result set. The response would look like: ```http < HTTP/1.1 200 OK ... Content-Type: application/json Link: ; rel="next" {"repositories": ["alpine","contoso-marketing"]} ``` To get the next result block, issue the request using the `/v2/_catalog?last=contoso-marketing&n=2&orderby=` URL encoded in the `Link` header. Here is how the call would look like: ```bash curl -v -H "Authorization: Bearer $acr_access_token" "https://$registry/v2/_catalog?last=contoso-marketing&n=2&orderby=" ``` You can query the paginated results in a loop, as the following shows: ```bash registry="contosoregistry.azurecr.io" acr_access_token="eyJ...xcg" limit=2 operation=/v2/_catalog?n=$limit headers=$(mktemp -t headers.XXXXX) while [ -n "$operation" ] do echo "Operation" echo $operation catalog=$(curl -H "Authorization: Bearer $acr_access_token" "https://$registry$operation" -D $headers) echo "Catalog" echo $catalog operation=$(cat $headers | sed -n 's/^Link: <\(.*\)>.*/\1/p') done rm $headers ``` For more information, visit [Docker V2 API Reference - Listing Repositories](https://docs.docker.com/registry/spec/api/#listing-repositories). ### Tag Listing Assume you have the following: 1. A valid container registry, which here we'll call `contosoregistry.azurecr.io`. 2. A valid ACR access token, created with the correct scope for the API we're going to call. 3. A valid image in the registry, for example `hello-world`. Here's how a call to the `GET /v2//tags/list` API of the given image would look like when done via `curl`: ```bash registry="contosoregistry.azurecr.io" acr_access_token="eyJ...xcg" image="hello-world" curl -v -H "Authorization: Bearer $acr_access_token" "https://$registry/v2/$image/tags/list" ``` Note that `curl` by default does the request as a `GET` unless you specify a different verb with the `-X` modifier. This should result in a status 200 OK, and a body with a JSON payload listing the tags of this image: ```json {"name": "hello-world","tags": ["latest","v1","v2","v3"]} ``` #### Pagination To retrieve paginated tag results, add an `n` parameter to limit the number or results. We take `n=2` as example: ```bash registry="contosoregistry.azurecr.io" acr_access_token="eyJ...xcg" image="hello-world" limit=2 curl -v -H "Authorization: Bearer $acr_access_token" "https://$registry/v2/$image/tags/list?n=$limit" ``` This should result in a status 200 OK, and a body with a JSON payload listing the first `n` tags of this image. If there are more results, a `Link` header containing the request URL for the next result block is also returned. If the entire result set has been received, the `Link` header will not be returned. In this case, the first 2 tags are returned, and there are more entries in the result set. The response would look like: ```http < HTTP/1.1 200 OK ... Content-Type: application/json Link: ; rel="next" {"name":"hello-world","tags":["latest","v1"]} ``` To get the next result block, issue the request using the `/v2/hello-world/tags/list?last=v1&n=2&orderby=` URL encoded in the `Link` header. Here is how the call would look like: ```bash curl -v -H "Authorization: Bearer $acr_access_token" "https://$registry/v2/$image/tags/list?last=v1&n=2&orderby=" ``` You can query the paginated results in a loop, as the following shows: ```bash registry="contosoregistry.azurecr.io" acr_access_token="eyJ...xcg" image="hello-world" limit=2 operation=/v2/$image/tags/list?n=$limit headers=$(mktemp -t headers.XXXXX) while [ -n "$operation" ] do echo "Operation" echo $operation tags=$(curl -H "Authorization: Bearer $acr_access_token" "https://$registry$operation" -D $headers) echo "Tags" echo $tags operation=$(cat $headers | sed -n 's/^Link: <\(.*\)>.*/\1/p') done rm $headers ``` For more information, visit [Docker V2 API Reference - Listing Image Tags](https://docs.docker.com/registry/spec/api/#listing-image-tags). ## Samples API Call scripts This is a summary script of the points discussed above. The first three variables have to be filled out. - Variable `registry` can be something like `"contosoregistry.azurecr.io"`. - The AAD access token and AAD refresh token values can be obtained from the Azure CLI, after running `az login` check file `$HOME/.azure/accessTokens.json` (`%HOMEDRIVE%%HOMEPATH%\.azure\accessTokens.json` in Windows) for the token values. Note that a stale AAD tokens will result in this script failing to obtain an ACR refresh token, and therefore it won't succeed in obtaining an ACR access token or in executing the operation against the registry. ### Catalog Listing with AAD refresh token ```bash #!/bin/bash registry=" --- you have to fill this out --- " aad_refresh_token=" --- you have to fill this out --- " aad_access_token=" --- you have to fill this out --- " operation="/v2/_catalog" acr_refresh_token=$(curl -s -X POST -H "Content-Type: application/x-www-form-urlencoded" -d "grant_type=access_token_refresh_token&service=$registry&refresh_token=$aad_refresh_token&access_token=$aad_access_token" https://$registry/oauth2/exchange | jq '.refresh_token' | sed -e 's/^"//' -e 's/"$//') echo "ACR Refresh Token" echo $acr_refresh_token challenge=$(curl -vs https://$registry$operation 2>&1 | grep "Www-Authenticate:") echo "Challenge" echo $challenge scope=$(echo $challenge | egrep -o 'scope=\"([^\"]*)\"' | egrep -o '\"([^\"]*)\"' | sed -e 's/^"//' -e 's/"$//') echo "Scope" echo $scope acr_access_token=$(curl -s -X POST -H "Content-Type: application/x-www-form-urlencoded" -d "grant_type=refresh_token&service=$registry&scope=$scope&refresh_token=$acr_refresh_token" https://$registry/oauth2/token | jq '.access_token' | sed -e 's/^"//' -e 's/"$//') echo "ACR Access Token" echo $acr_access_token catalog=$(curl -s -H "Authorization: Bearer $acr_access_token" https://$registry$operation) echo "Catalog" echo $catalog ``` ### Catalog listing using SP/Admin with Basic Auth Here's an equivalent set of scripts that will allow you to execute an operation against an Azure Container Registry, but this time using only the admin credentials, and not AAD. If you'd like to use basic auth, you can do a direct call to the registry like this: ```bash #!/bin/bash registry=" --- you have to fill this out --- " user=" --- you have to fill this out --- " password=" --- you have to fill this out --- " operation="/v2/_catalog" credentials=$(echo -n "$user:$password" | base64 -w 0) catalog=$(curl -s -H "Authorization: Basic $credentials" https://$registry$operation) echo "Catalog" echo $catalog ``` ### Catalog listing using Admin Keys with Bearer Auth If you'd like to use bearer auth, you have to first convert your admin credentials to an ACR access token like this: ```bash #!/bin/bash registry=" --- you have to fill this out --- " user=" --- you have to fill this out --- " password=" --- you have to fill this out --- " operation="/v2/_catalog" challenge=$(curl -vs https://$registry$operation 2>&1 | grep "Www-Authenticate:") echo "Challenge" echo $challenge scope=$(echo $challenge | egrep -o 'scope=\"([^\"]*)\"' | egrep -o '\"([^\"]*)\"' | sed -e 's/^"//' -e 's/"$//') echo "Scope" echo $scope credentials=$(echo -n "$user:$password" | base64 -w 0) acr_access_token=$(curl -s -H "Content-Type: application/x-www-form-urlencoded" -H "Authorization: Basic $credentials" "https://$registry/oauth2/token?service=$registry&scope=$scope" | jq '.access_token' | sed -e 's/^"//' -e 's/"$//') echo "ACR Access Token" echo $acr_access_token catalog=$(curl -s -H "Authorization: Bearer $acr_access_token" https://$registry$operation) echo "Catalog" echo $catalog ``` ### Docker login with ACR Access Token - Single repository scope The following script uses an AAD token to request an 'ACR access token` which can be used as a docker login credential. ```bash #/bin/sh set -e REGISTRY=" --- you have to fill this out --- " REPOSITORY=" --- you have to fill this out --- " AAD_ACCESS_TOKEN=$(az account get-access-token --query accessToken -o tsv) ACR_REFRESH_TOKEN=$(curl -s -X POST -H "Content-Type: application/x-www-form-urlencoded" \ -d "grant_type=access_token&service=$REGISTRY&access_token=$AAD_ACCESS_TOKEN" \ https://$REGISTRY/oauth2/exchange \ | jq '.refresh_token' \ | sed -e 's/^"//' -e 's/"$//') echo "ACR Refresh Token obtained." # Create the repo level scope SCOPE="repository:$REPOSITORY:pull" # to pull multiple repositories passing in multiple scope arguments. #&scope="repository:repo:pull,push" ACR_ACCESS_TOKEN=$(curl -s -X POST -H "Content-Type: application/x-www-form-urlencoded" \ -d "grant_type=refresh_token&service=$REGISTRY&scope=$SCOPE&refresh_token=$ACR_REFRESH_TOKEN" \ https://$REGISTRY/oauth2/token \ | jq '.access_token' \ | sed -e 's/^"//' -e 's/"$//') echo "ACR Access Token obtained." # Docker Login using the ACR_ACCESS_TOKEN echo docker login into $REGISTRY docker login -u 00000000-0000-0000-0000-000000000000 -p $ACR_ACCESS_TOKEN $REGISTRY docker pull $REGISTRY/$REPOSITORY ``` ### Fetch helm index.yaml with Admin Keys or SP with Basic Auth ```bash #!/bin/bash registry=" --- you have to fill this out --- " user=" --- you have to fill this out --- " password=" --- you have to fill this out --- " operation="/helm/v1/repo/index.yaml" challenge=$(curl -vs https://$registry$operation 2>&1 | grep "Www-Authenticate:") echo "Challenge" echo $challenge scope=$(echo $challenge | egrep -o 'scope=\"([^\"]*)\"' | egrep -o '\"([^\"]*)\"' | sed -e 's/^"//' -e 's/"$//') echo "Scope" echo $scope credentials=$(echo -n "$user:$password" | base64 -w 0) acr_access_token=$(curl -s -H "Content-Type: application/x-www-form-urlencoded" \ -H "Authorization: Basic $credentials" "https://$registry/oauth2/token?service=$registry&scope=$scope" | jq '.access_token' | sed -e 's/^"//' -e 's/"$//') echo "ACR Access Token" echo $acr_access_token #Retrieve the location header and strip the trailing \r for curl URL=$(curl -sD - -H "Authorization: Bearer $acr_access_token" https://$registry$operation | grep -Fi Location | awk '{print $2}' | tr -d '\r') echo Location=$URL echo index.yaml echo ---------- curl $URL ``` ================================================ FILE: docs/FAQ.md ================================================ # Azure Container Registry - Frequently Asked Questions This article has moved to [Microsoft Docs](https://docs.microsoft.com/azure/container-registry/container-registry-faq). ================================================ FILE: docs/README.md ================================================ --- title: Overview type: post --- ## Overview This repo contains [issues](https://github.com/Azure/acr/issues), [samples](./docs), [troubleshooting tips](./docs/Troubleshooting%20Guide.md), and a collection of links for Azure Container Registry. ## Blog posts * [Choosing a Docker Container Registry](https://stevelasker.blog/2018/11/14/choosing-a-docker-container-registry/) * [Key Differences between VM and Container Vulnerability Scanning](https://stevelasker.blog/2018/06/27/key-differences-between-vm-and-container-vulnerability-scanning/) * [Working with Geo-replication notifications](https://stevelasker.blog/2018/01/29/working-with-acr-geo-replication-notifications/) * [User Accounts](https://stevelasker.blog/2016/11/17/azure-container-registry-user-accounts/) * [Docker Tagging Best Practices](https://stevelasker.blog/2018/03/01/docker-tagging-best-practices-for-tagging-and-versioning-docker-images/) * [Deploying Docker Images to Azure Container Instances](https://stevelasker.blog/2017/07/28/deploying-docker-images-from-the-azure-container-registry-to-azure-container-instances/) ## Links See [ACR Links](../README.md/#links) ================================================ FILE: docs/Token-BasicAuth.md ================================================ --- type: post title: "Token with Basic Auth" --- # Azure Container Registry's support of getting Bearer token using Basic Authentication The Azure Container Registry supports both Basic Authentication and OAuth2 for getting a registry Bearer token. This document describes how to get a Bearer token using Basic Authentication. To get the token using OAuth2, please refer to the [AAD-OAuth doc](https://github.com/Azure/acr/blob/master/docs/AAD-OAuth.md). ## Using the token API ACR has implemented the GET method on the token endpoint for user to retrieve a Bearer token using Basic Authentication: GET /oauth2/token ### Get the scope of the token to be requested The first thing you want is to obtain an authentication challenge for the operation you want to on the Azure Container Registry. That can be done by targetting the API you want to call without any authentication. Here's how to do that via `curl`: ```bash export registry="contosoregistry.azurecr.io" curl -v https://$registry/v2/hello-world/manifests/latest ``` Note that `curl` by default does the request as a `GET` unless you specify a different verb with the `-X` modifier. This will output the following payload, with `...` used to shorten it for illustrative purposes: ```bash < HTTP/1.1 401 Unauthorized ... < Www-Authenticate: Bearer realm="https://contosoregistry.azurecr.io/oauth2/token",service="contosoregistry.azurecr.io",scope="repository:hello-world:pull" ... {"errors":[{"code":"UNAUTHORIZED","message":"authentication required","detail":[{"Type":"repository","Name":"hello-world","Action":"pull"}]}]} ``` Notice the response payload has a header called `Www-Authenticate` that gives us the following information: - The type of challenge: `Bearer`. - The realm of the challenge: `https://contosoregistry.azurecr.io/oauth2/token`. - The service of the challenge: `contosoregistry.azurecr.io`. - The scope of the challenge: `repository:hello-world:pull`. The body of the payload might provide additional details, but all the information you need is contained in the `Www-Authenticate` header. With this information we're now ready to call `GET /oauth2/token` to obtain an ACR access token that will allow us to use the `GET /v2/hello-world/manifests/latest` API. ### Encode the username and password - You can use Windows Powershell or `base64` command line utility in Linux/Mac - Encode using the following format: **[username]**:**[password]** - Powershell: - `[convert]::ToBase64String([Text.Encoding]::UTF8.GetBytes('[username]:[password]'))` - Linux/Mac Terminal: - `echo -n '[username]:[password]' | base64` - Copy the encoded value and set it as a environment variable ```bash export acr_credential="xxxxxxx" ``` ### Get a Pull access token for the user **REST format:** `https://`**[login-url]**`/oauth2/token?service=`**[login-url]**`&scope=repository:`**[image]**`:pull,push` Set the header for Authorization, setting the 'Basic' word followed by a space and the encoded usr:pwd value |Header | Value | |-------|-------| | Authorization | Basic [base64 encoded usr:pwd] | | Host | [login-url] | Here's how such a call looks when done via `curl`: ```bash export registry="contosoregistry.azurecr.io" export scope="repository:hello-world:pull" curl -v -H "Authorization: Basic $acr_credential" \ "https://$registry/oauth2/token?service=$registry&scope=$scope" ``` The outcome of this operation will be a response with status 200 OK and a body with the following JSON payload: ```json {"access_token":"eyJ...xcg"} ``` This response is the ACR access token which you can inspect with [jwt.ms](https://jwt.ms/). You can now use it to call APIs exposed by the Azure Container Registry. ### Calling an Azure Container Registry API In this example we'll call the `GET /v2/{repository}/manifests/{tag}` API on an Azure Container Registry. Assume you have the following: 1. A valid container registry, which here we'll call `contosoregistry.azurecr.io`. 2. A valid ACR access token, created with the correct scope for the API we're going to call. Here's how a call to the `GET /v2/hello-world/manifests/latest` API of the given registry would look like when done via `curl`: ```bash export registry="contosoregistry.azurecr.io" export acr_access_token="eyJ...xcg" curl -v -H "Authorization: Bearer $acr_access_token" -H "Accept:application/vnd.oci.image.manifest.v1+json" https://$registry/v2/hello-world/manifests/latest ``` This should result in a status 200 OK. ================================================ FILE: docs/Troubleshooting Guide.md ================================================ # Azure Container Registry - Troubleshooting guide ## I get an error while creating a registry - "Unregistered Subscription specified" You need to register the subscription using Powershell: ``` Register-AzureRmResourceProvider -ProviderNamespace Microsoft.ContainerRegistry ``` Az CLI: ``` az provider register –n Microsoft.ContainerRegistry ``` ## I'm able to create registry in one region but not in another region As we add more regions, the service in new region needs to know about your subscription. So please register your subscription again so that ACR service in newer regions will know about your subscription See [here](#registersub) ## Azure CLI - I get this error - No resource with type Microsoft.ContainerRegistry/registries can be found with name Please run this command and check if you have set the right subscription ``` az account show ``` Please run this command to set the correct subscription ``` az account set --subscription ``` ## Azure CLI - Not able to use az cli to query/view my registries See [this](#registersub) and [this](#setcorrectsub) ## Image exists in my ACR but, docker pull returns "image not found" Please make sure you login before you pull/push repositories ``` docker login .azurecr.io ``` ## Configuring a custom domain for azure container registry Azure container registries have a typical login url of the format `*.azurecr.io`. A customer might like to use a custom domain for the registry. Follow [this guide](custom-domain/README.md) to achieve that. ## Moving repositories to a new registry To move your repositories to a newly created registry, follow [this guide](move-repositories-to-new-registry/README.md). ## Failed to add a virtual network from a different Azure subscription If you want to restrict registry access using a virtual network in a different Azure subscription, you will see the following error if the subscription hasn't registered the `Microsoft.ContainerRegistry` resource provider: ``` Failed to save firewall and virtual network settings for container registry 'MyRegistry'. Error: Could not validate network rule - The client '00000000-0000-0000-0000-000000000000' with object id '00000000-0000-0000-0000-000000000000' does not have authorization to perform action 'Microsoft.Network/virtualNetworks/taggedTrafficConsumers/validate/action' over scope '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/MyRG/providers/Microsoft.Network/virtualNetworks/MyRegistry/taggedTrafficConsumers/Microsoft.ContainerRegistry' or the scope is invalid. If access was recently granted, please refresh your credentials. ``` You need to register the resource provider for Azure Container Registry in that subscription. For example: Azure CLI ``` az account set --subscription az provider register --namespace Microsoft.ContainerRegistry ``` ## Check role assignments on a registry ``` az role assignment list --scope /subscriptions//resourceGroups//providers/Microsoft.ContainerRegistry/registries/ ``` See [here](https://docs.microsoft.com/cli/azure/role/assignment?view=azure-cli-latest#az-role-assignment-list) for reference ================================================ FILE: docs/acr-roadmap.md ================================================ # Azure Container Registry Roadmap Visit [ACR Public Roadmap](https://github.com/orgs/Azure/projects/259) to see what we are building next. Please note, that we do not communicate specific dates for delivery. We also do not commit to delivery items outside of our rolling 6-12 month window. ## Helping with Prioritization Have a request, or wish we were doing something; Please provide your feedback and ranking to help us understand your needs and priority through [UserVoice][uservoice]. [uservoice]: https://aka.ms/acr/uservoice ================================================ FILE: docs/aks-acr-across-tenants.md ================================================ # Set up AKS to pull from ACR in a different AD tenant ## Introduction There are several ways to set up the auth credential in Kubernetes to pull image from ACR. For example, you can use [admin user or repository scoped access token](https://docs.microsoft.com/en-us/azure/aks/kubernetes-service-principal) to configure pod [imagePullSecrets](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). While `imagePullSecrets` is commonly used, it brings the challenge and overhead to manage the corresponding secret. On Azure, you can set up [AKS cluster with a service principal credential](https://docs.microsoft.com/en-us/azure/aks/kubernetes-service-principal) which allows you securely pull the image from ACR without additional `imagePullSecrets` setting on each pod. Sometimes, you may have your AKS and ACR in different Azure Active Directories (Tenants). This document will walk your through the steps to enable cross tenant authentication using service principal credential. ## Instruction In this example, the AKS cluster is in `Tenant A` and the ACR is in `Tenant B`. `Tenant A` is also the service principal home tenant. You will need the contributor role of AKS subscription and the owner role of ACR subscription. ### Step 1: Enable multi-tenant AAD Application - Login [Azure portal](http://portal.azure.com/) in `Tenant A` and go to Azure Active Directory `App registrations` blade to find the service principal application object. - Remember the `Application (client) ID` (it will be used in `step 2` and `step 4`) ![](./media/multi-tenant-app.png) - Choose multitenant account type as the following screenshot and also remember the `redirect url` (it will be used in step 2). ![](./media/enable-multi-tenant-app.png) - Create a client secret if not exist (It is __IMPORTANT__ to make sure you use this client secret to update AKS in `step 4`). ![](./media/aad-app-client-secret.png) ### Step 2: Provision the service principal in ACR Tenant - Open the following link with the Tenant B admin account and accept the permission request. ``` https://login.microsoftonline.com//oauth2/authorize?client_id=&response_type=code&redirect_uri= ``` ![](./media/multi-tenant-app-consent.png) ### Step 3: Grant service principal ACR image pull permission - Assign AcrPull role to the service principal ![](./media/multi-tenant-app-acr-pull.png) ### Step 4: Update AKS with the AAD Application secret - Use the `Application (client) ID` and `client secret` collected in `step 1` to [update AKS service principal credential](https://docs.microsoft.com/en-us/azure/aks/update-credentials#update-aks-cluster-with-new-service-principal-credentials). ## Reference - [Application and service principal objects in Azure Active Directory](https://docs.microsoft.com/en-us/azure/active-directory/develop/app-objects-and-service-principals) ================================================ FILE: docs/artifact-media-types.json ================================================ { "application/vnd.docker.distribution.manifest.v2+json": "Docker images", "application/vnd.cncf.helm.chart.config.v1+json": "Helm charts", "application/vnd.oci.image.config.v1+json": "OCI images", "application/vnd.cncf.openpolicyagent.config.v1+json": "Open Policy Agent bundles", "application/vnd.sylabs.sif.config.v1+json": "Singularity images" } ================================================ FILE: docs/blog/abac-repo-permissions.md ================================================ --- title: Introducing Azure Container Registry Repository Permissions through Attribute-Based Access Control (Private Preview) description: Learn about the new Repository Permissions feature for Azure Container Registry during the private preview. The feature ensures secure and efficient repository permissions management for Azure Container Registry. ms.topic: whats-new #Don't change. ms.date: 08/12/2024 ms.author: johsh author: johnsonshi ms.service: container-registry --- # What's New: Manage Repository Permissions for Azure Container Registry through Attribute-Based Access Control (ABAC) > [!NOTE] > The Repository Permissions feature for Azure Container Registry is currently in private preview. For details on enrolling in the Private Preview and to ensure a smooth experience, please follow the provided instructions. If you're looking to stay updated with the latest enhancements in Azure Container Registry (ACR), particularly in managing repository permissions, this article is for you. We are excited to announce the private preview of managing repository permissions in ACR in Azure role assignments, a feature that transforms how you manage access to your repositories. Azure Attribute-Based Access Control (ABAC) allows for more granular repository-level permissions during Azure role assignments with Entra identities. During Azure Entra role assignments, role permissions can be scoped to specific repositories within a registry rather granting permissions to the entire registry. This feature improves the security footprint by ensuring permissions are precisely assigned according to your needs. Understanding the new ACR ABAC Repository Permissions will help you optimize your workflow and enhance your security measures. So, let's dive in and explore what's new! ## Azure Attribute-Based Access Control (ABAC) capabilities Azure Attribute-Based Access Control (ABAC) builds on top of Azure RBAC by allowing repository conditions during Azure Entra role assignments for ACR. - **Condition-based Role Assignments**: Azure ABAC lets you [specify repository conditions for Azure Entra role assignments](https://learn.microsoft.com/en-us/azure/role-based-access-control/conditions-overview), scoping role permissions to specific repositories based on repository name conditions. - **Repository Name Conditions**: You can grant access to repositories matching certain prefixes or exact names, tailoring permissions to your organizational needs. - **Compatibility with Roles**: ABAC conditions work with both [built-in ACR roles](https://learn.microsoft.com/en-us/azure/container-registry/container-registry-roles) and custom role assignments, providing flexibility in repository permission management during Azure Entra role assignments. - **Identity Support**: ABAC Repository Permissions support various Microsoft Entra ID identities, including users, groups, service principals, and managed identities, ensuring comprehensive access control for all role assignment scenarios. - **SKU Support**: All ACR SKUs support ABAC, making it available across different service levels. This feature is a significant step towards more secure and precise access management within Azure Container Registry. ## Related content For private preview onboarding and documentation, please visit [Attribute-Based Access Control for Azure Container Registry Repository Permissions (Private Preview)](../preview/abac-repo-permissions/README.md). ================================================ FILE: docs/blog/connected-registry.md ================================================ --- title: Connected Registry Private Preview description: Private preview for ACR connected registry feature. ms.topic: post ms.date: 01/05/2021 ms.author: memladen author: toddysm ms.custom: --- ## Private Preview - ACR Connected Registry Feature We are announcing the private preview of the Azure Container Registry (ACR) connected registry feature. The connected registry feature of ACR allows you to deploy a registry on your premises and synchronize images between the ACR and your premises. It brings the container images and OCI artifacts closer to your container workloads on premises and increases their acquisition performance. ACR connected registry can be used in conjunction with [Azure IoT Edge](https://azure.microsoft.com/services/iot-edge/), [Azure Arc](https://azure.microsoft.com/en-us/services/azure-arc/), [Azure Stack](https://azure.microsoft.com/overview/azure-stack/) as well as other edge container workloads. Connected registry is currently in limited preview. To request preview access, submit your contact details using this [form](https://forms.office.com/Pages/ResponsePage.aspx?id=v4j5cvGGr0GRqy180BHbR1OsLxas9SdIhfyFenqqkolUMkFKMTdDSU45SFQzU0o0WUNROVAySkRINy4u). For private preview documentation, please visit [Connected Registry Private Preview Details](../preview/connected-registry/README.md). --- Toddy Mladenov ================================================ FILE: docs/blog/teleport.md ================================================ > [!NOTE] > Please visit [aka.ms/acr/artifact-streaming](https://aka.ms/acr/artifact-streaming). --- type: post title: "Overview" excerpt: "project teleport" tags: [developers, teleport] date: 2019-11-01 17:00:00 author: Steve Lasker --- # Azure Container Registry Adds Teleportation ![](https://stevelaskerblog.files.wordpress.com/2019/10/image-8.png?w=132) Instancing a custom environment within seconds is one of the many wonders of running containers. Having to wait for the image and its layers to download & decompress the first time is the _current_ price of admission. > Project Teleport removes the cost of download and decompression by SMB mounting pre-expanded layers from the Azure Container Registry to Teleport enabled Azure container hosts. ## Teleportation Performance The following table represents initial performance metrics across different image sizes. The amount of time to teleport an image has less to do with the size of the image, but rather the number of layers that must be mounted. This is an area of performance we’ll continue to focus upon. ![](https://stevelaskerblog.files.wordpress.com/2019/10/teleportmetricslayers.png?w=1024) > Our opportunistic goal for Project Teleport is 90% of locally cached images. We’re not considering teleportation of organic material, as 90% would not be _quite_ good enough. Being able to pull any custom image, to any serverless host, at 90% of the startup time, seems pretty good. Especially considering it’s a 100% unattainable goal of having every custom image on every serverless host. > > Steve Lasker – Program Manager – Azure Container Registries ## How Project Teleport Optimizes Registry Operations While Docker didn’t invent containers, they did provide a highly productive end to end experience for building, pushing, discovering, pulling and instancing containers. Container registries are one of the innovations that provide content addressable objects through a collection of layers. ![](https://stevelaskerblog.files.wordpress.com/2019/10/dockerimagepull.png?w=1024) The underlying container flow today involves: 1. pulling an image, which calls a REST endpoint, returning a collection of layer IDs 2. comparing the local cache, determining the delta of layers that must be retrieved 3. requesting secured URLs for each layer ID 4. pulling each layer 5. decompressing each layer 6. instancing the container when all the layers are available This flow works well for internet protocols where the network is comparatively unreliable and slower than an intra-datacenter network. For a reasonably sized image, it’s faster to serve compressed blobs, decompressing on the client, than waiting for larger payloads to fight with YouTube, Netflix and a million other packets traveling across the wild internet. When running within a controlled datacenter, the network is reliable and fast, while CPU, disk speed and memory become the bottleneck for pulling complete layers and decompressing them before usage. When using dedicated hosts, such as VMs provisioned for Kubernetes, pulling the first image is painful, but subsequent image pulls benefit from the pre-cached layers. As clouds move to “serverless” environments, where the hosts are dynamically allocated, each container run is a new environment. Cloud providers pre-cache the most common base layers, but the hit ratio varies across each service and time, as newer versions are continually released. This creates an inconsistent experience, detracting from the value of serverless. ## Highly Factored Registry Protocol The designers of the [distribution-spec](https://github.com/opencontainers/distribution-spec/) and [image-spec](https://github.com/opencontainers/image-spec) created a highly factored protocol that enables cloud providers to adhere to a public spec, with the flexibility to implement cloud specific storage and authentication solutions. Project Teleport takes advantage of this factoring by adhering to the public API that container developers are accustomed to, while providing cloud specific optimizations. [](https://stevelaskerblog.files.wordpress.com/2019/10/telportimagepull.png?w=1024) Project Teleport assumes the image pull runs within an optimized environment. The underlying Teleport flow is slightly, but very impactfully different, involving: 1. pulling an image, which involves a REST endpoint that returns the collection of layer IDs 2. comparing the local cache, determining the delta of layers that must be retrieved 3. **_requesting [Azure Premium File](https://azure.microsoft.com/en-us/blog/announcing-the-general-availability-of-azure-premium-files/) mount points for each layer ID_** 4. **_[SMB](https://en.wikipedia.org/wiki/Server_Message_Block) mounting each layer as pre-expanded content_** 5. instancing the container when all the layers are available The benefits of Project Teleport include: * when using the SMB protocol, only the content read by the container is pulled across the network, speeding container start time * no decompression in the run flow, removing the additional CPU, local disk speed and memory bottlenecks * overall reduced network traffic, as only the subset of an image that’s utilized is pulled across the network * the ability to leverage local image cache information as the teleported mounts intermix with the local cache ## Orca, a Teleport Client for Azure Project Teleport is a registry transport protocol, enabling container layers to be teleported from the registry directly to a container host. Normally, you would issue docker run commands to pull and run an image. However, we need a means to plugin the teleport protocol to the container host. Project Teleport takes advantage of the [containerd snapshot plugin](https://github.com/containerd/containerd#snapshot-plugins). As containerd and the docker client evolve, we’ll simply plugin Project Teleport to a new docker client. Until that time, we provide an orca client, for a subset of docker functionality, focusing on the running of container images. For instance, container building is not yet supported. | | | |-----------------|-----------------------| | ![](https://stevelaskerblog.files.wordpress.com/2019/10/image-2.png?w=363)| ![](https://stevelaskerblog.files.wordpress.com/2019/10/image-3.png?w=245) | | Orca represents the amazing Orca species of whales, roaming the Seattle Puget Sound.| Our own Brendan Burns also has a sailboat, appropriately named Orca.| ## Previewing Teleportation with ACR Tasks While our goal is to enable Project Teleport on all Azure Services, today we are previewing Teleport with [ACR Tasks](https://aka.ms/acr/tasks). ACR Tasks provides the ability build and run container images in a highly optimized and securely isolated environment. The initial Project Teleport preview focuses on running Linux images. Because ACR Tasks is a focused environment, we can preview teleportation with customer provided images without having to support a large surface area. Based on your feedback and the evolution of containerd, we’ll know when we can expand usage to other Azure services ## Running Containers With the Orca Client, Using the Teleport Transport The following two commands demonstrate running ACR Tasks with and without Project Teleport. **ACR Run:** `az acr run -r demo42 --cmd "demo42.azurecr.io/batchprocessor:1" /dev/null` The above command executes an [ACR Task](https://aka.ms/acr/task), on the `demo42` registry. The `--cmd` parameter runs the `batchprocessor` image. Like `docker run`, `acr task run` takes a positional argument that represents the context. Since we’re not passing a context, we just pass `/dev/null` **Teleporting the batchprocessor image:** `az acr run -r demo42 --cmd "**orca run** demo42.azurecr.io/batchprocessor:1" /dev/null` The above command instructs [ACR Tasks](https://aka.ms/acr/task) to use the orca client to run the `batchprocessor` image. Over time, the `--cmd` parameter will directly support Teleport enabled images, removing the need to specify `orca run`. ## Under the Hood of an Image Teleporter [](https://stevelaskerblog.files.wordpress.com/2019/10/dockerorca.png?w=1024) Within ACR, we’ve expanded support from compressed blobs, using [Azure Blob Storage](https://azure.microsoft.com/services/storage/blobs/), to [Azure Premium Files](https://azure.microsoft.com/en-us/blog/announcing-the-general-availability-of-azure-premium-files/), storing expanded layers. Each expanded layer is persisted as a [virtual hard disk (.vhd)](https://en.wikipedia.org/wiki/VHD_(file_format)) which are supported by Linux and Windows clients. To support standard docker clients, or any client capable of pushing a container image, ACR accepts the incoming image and checks to see if the target repository supports teleportation. If the repository is teleport enabled, an **ACR expansion service** creates a decompressed .vhd for each layer. By storing each layer as a .vhd, ACR can continue to maintain de-duping of common layers across multiple images, while maintaining repository based RBAC. When a request is made to pull an image, the orca client provides header information stating the region and whether it’s teleport enabled. If the registry is in the same region, teleport SMB mount points are returned. If the client is in a different region, a fallback to compressed blob URLs are returned. > The SMB Teleporter depends on intra-datacenter networks, limiting short range teleportation. To enable a [best practice of having images network-close to the container host](https://stevelasker.blog/2018/11/14/choosing-a-docker-container-registry/), future releases will support multi-region Teleportation through an [ACR Geo-replication](https://aka.ms/acr/geo-replication) translocator. [](https://stevelaskerblog.files.wordpress.com/2019/10/orcadocker.png?w=1024) In future releases we plan to enable ACR Task build support, teleporting base images and writing new image layers directly to the registry. As the image build completes, ACR will compress the layers into traditional blobs, enabling standard docker clients. ![](https://stevelaskerblog.files.wordpress.com/2019/10/orcaorca.png?w=1024) When paired with [ACR Task buildx caching](https://github.com/Azure/acr/blob/master/docs/Tasks/buildx/README.md), dramatic improvements from code-commit to deploy performance can be realized. ## The Future of Container Teleportation The future of Project Teleport is broken into the following categories: * Incorporating user feedback * Improved mounting performance * Supporting all Azure services using containers * Windows containers * Building images, teleporting the base layers and writing expanded layers directly to the registry * Geo-replication translocation Thankfully, the teleport project is split across multiple teams, enabling parallelization. ## Teleporting Images Across All Azure Container Hosts Project Teleport is designed to support all container hosts, including Linux & Windows, and all Azure services. This includes AKS, ACI, Virtual Kubelet, Machine Learning, ACR Tasks and the golden serverless scenario, Azure Functions. ## Teleporting Serverless Functions When we think about serverless functions, the ability to instantly run some custom set of code becomes the holy grail. The service must scale from 0 to infinity (_and beyond_), while only charging for the actual usage. The reference to **_instant_** and **_custom code_** is the challenge. Today, serverless platforms utilize containers to host known environments for specific language runtimes. To achieve specific language runtimes, services mount user code into a pre-allocated pool of container instances. Pulling custom images is just too slow. With Project Teleport, we can now expand the environments and the languages you prefer, bringing whatever custom images you desire in near instant time. ## How Can You Teleport Your Containers? The customer feedback we get with ACR Tasks will help us improve teleportation across all Azure service hosts. We’ve been working on Teleportation since early 2018, so we’re obviously excited to hear what you think, and learn how we need to complete the scenarios. After the first round of a private preview feedback, we’ll open a public preview. * To Help us test teleportation of your images – [sign up here](https://aka.ms/teleport/signup) for the private preview * Are you just as excited with container scenarios, building teleporters and other [ACR roadmap capabilities](https://aka.ms/acr/roadmap)? Apply here for [ACR Jobs](https://aka.ms/acr/jobs) --- Steve Lasker ================================================ FILE: docs/container-registry-consuming-public-content.md ================================================ --- title: How to manage public content in private registry description: .... ms.service: container-registry ms.topic: article ms.date: 10/27/2020 author: stevelas --- # How to consume & maintain public content with Azure Container Registry Tasks An Azure container registry hosts your container images and other [OCI artifacts][oci-artifacts] in a private, authenticated environment. However, your environment may have dependencies on public content such as public container images, [helm charts][helm-charts], [Open Policy Agent (OPA)][opa] policies or other artifacts. For example, you might run [nginx] for service routing or `docker build FROM` [alpine][alpine-public-image] by pulling images directly from Docker Hub or another public registry. As upstream changes occur, this article will explain how to import and maintain these public artifacts. For more information about the risks introduced by dependencies on public content and best practices see the [OCI Consuming Public Content Blog post][oci-consuming-public-content]. This article covers features and workflows in Azure Container Registry to help you manage consuming and maintaining public content: * Import local copies of dependent public images. * Validate public images through security scanning and functional testing. * Promoting to private registries for internal usage. * Triggering base image updates for applications dependent upon public content. * Using [ACR Tasks](container-registry-tasks-overview.md) to automate this workflow. ![Consuming Public Content Workflow](./media/container-registry-consuming-public-content/consuming-public-content-workflow.png) This article refers mainly to container images, but the concepts apply to other supported [registry artifacts](container-registry-image-formats.md). The gated import workflow refers to decoupling your organizations dependencies on externally managed artifacts. For instance, images sourced from public registries like: [docker hub][docker-hub], [gcr][gcr], [quay][quay], [github container registry][ghcr], [Microsoft Container Registry][mcr] or even other public [Azure Container Registries][acr]. Consider balancing these two, possibly conflicting goals: 1. Do you really want an unexpected upstream change to possibly take out your production system? 2. Do you want upstream security fixes, for the versions you depend upon, to be automatically deployed? ## Prerequisites * Create three registries to represent the workflow * A simulated copy of docker hub for public images. * This allows us simulate a base image update, which would normally be initiated on [Docker Hub][docker-hub] or other public registries. * A development team registry, that will host one more more teams that build and manage images. * **Note:** [repository based RBAC (preview)][acr-repo-permissions] is now available, enabling multiple teams to share a single registry, with unique permission sets * A registry to host imported base artifacts. * An Azure KeyVault for storing access keys to the registries * An [Azure Container Instance][aci] to host the `hello-world` image. The following steps will: 1. Configure unique values for your environment 1. Simulate a Public Registry 1. Automate building a hello-world image 1. Automate deploying to an [Azure Container Instance][aci] 1. Simulate upstream changes directly to your environment 1. Create a gated import, that validates upstream changes are appropriate for your environment ![import workflow components](./media/container-registry-consuming-public-content/consuming-public-content-objects.png) This walk through will: 1. Configure three registries representing: * Simulated Docker Hub (`publicregistry`)to support changing the base image * Team registry (`contoso`) for private images * Company/team shared registry (`baseartifacts`) for imported public content 2. Configure ACR Tasks to: * build the simulated public node image * import and validate the public node image to the company/team shared registry * build and deploy the hello-world image 3. ACR Task definitions, including configurations for: 4. Collection of registry credentials which can be pointers to KeyVault 5. Collection of secrets, available within an `acr-task.yaml`, which are pointers to KeyVault 6. Collection of configured values used within an `acr-task.yaml`. 7. An Azure KeyVault, securing all secrets 8. An Azure Container Instance, hosting the hello-world build application ### Set environment variables Configure variables unique to your environment. We follow best practices for placing resources with durable content in their own resource group to minimize accidental deletion, however you can place these in a single resource group if desired. ```azurecli # Set the three registry names, unique to your environment: REGISTRY_PUBLIC=publicregistry REGISTRY_BASE_ARTIFACTS=contosobaseartifacts REGISTRY=contoso # set the location all resources will be created in: RESOURCE_GROUP_LOCATION=eastus # default resource groups REGISTRY_PUBLIC_RG=${REGISTRY_PUBLIC}-rg REGISTRY_BASE_ARTIFACTS_RG=${REGISTRY_BASE_ARTIFACTS}-rg REGISTRY_RG=${REGISTRY}-rg # fully qualified registry urls REGISTRY_DOCKERHUB_URL=docker.io REGISTRY_PUBLIC_URL=${REGISTRY_PUBLIC}.azurecr.io REGISTRY_BASE_ARTIFACTS_URL=${REGISTRY_BASE_ARTIFACTS}.azurecr.io REGISTRY_URL=${REGISTRY}.azurecr.io # Azure KeyVault for storing secrets AKV=acr-task-credentials AKV_RG=${AKV}-rg # ACI for hosting the deployed application ACI=hello-world-aci ACI_RG=${ACI}-rg ``` ### GIT repositories and tokens To simulate your environment, fork each of these into repositories you can mange. Then, update the variables for your forked repositories. Notice `:main` concatenated to the end of the git URLs representing the default repository branch. ```azurecli GIT_BASE_IMAGE_NODE=https://github.com/importing-public-content/base-image-node.git#main GIT_NODE_IMPORT=https://github.com/importing-public-content/import-baseimage-node.git#main GIT_HELLO_WORLD=https://github.com/importing-public-content/hello-world.git#main ``` Establish a [Git Token][git-token] for ACR Tasks to clone and establish git webhooks. See: @DAN, CAN YOU UPDATE TO A REFERENCE FOR REQUIRED PERMISSIONS? ```azurecli GIT_TOKEN= ``` Docker Hub Credentials To avoid throttling and identify requests, [create a Docker Hub token][docker-hub-tokens] ```azurecli REGISTRY_DOCKERHUB_USER= REGISTRY_DOCKERHUB_PASSWORD= ``` ### Create Resources Create the three registries: ```azurecli az group create --name $REGISTRY_PUBLIC_RG --location $RESOURCE_GROUP_LOCATION az acr create --resource-group $REGISTRY_PUBLIC_RG --name $REGISTRY_PUBLIC --sku Premium az group create --name $REGISTRY_BASE_ARTIFACTS_RG --location $RESOURCE_GROUP_LOCATION az acr create --resource-group $REGISTRY_BASE_ARTIFACTS_RG --name $REGISTRY_BASE_ARTIFACTS --sku Premium az group create --name $REGISTRY_RG --location $RESOURCE_GROUP_LOCATION az acr create --resource-group $REGISTRY_RG --name $REGISTRY --sku Premium ``` Create a KeyVault for secrets ```azurecli az group create --name $AKV_RG --location $RESOURCE_GROUP_LOCATION az keyvault create --resource-group $AKV_RG --name $AKV ``` Create a Docker Hub token To avoid throttling and identify requests, [create a Docker Hub token][docker-hub-tokens] ```azurecli az keyvault secret set \ --vault-name $AKV \ --name registry-dockerhub-user \ --value $REGISTRY_DOCKERHUB_USER az keyvault secret set \ --vault-name $AKV \ --name registry-dockerhub-password \ --value $REGISTRY_DOCKERHUB_PASSWORD ``` Set and Verify a Git token within KeyVault ```azurecli az keyvault secret set --vault-name $AKV --name github-token --value $GIT_TOKEN az keyvault secret show --vault-name $AKV --name github-token --query value -o tsv ``` Create a Resource Group for an Azure Container Instance ```azurecli az group create --name $ACI_RG --location $RESOURCE_GROUP_LOCATION ``` ### Create public node base image To simulate the node image on Docker Hub, create an [ACR Task][acr-task] to build and maintain the public image. This allows simulating changes by the node image maintainers. ```azurecli az acr task create \ --name node-public \ -r $REGISTRY_PUBLIC \ -f acr-task.yaml \ --context $GIT_BASE_IMAGE_NODE \ --git-access-token $(az keyvault secret show \ --vault-name $AKV \ --name github-token \ --query value -o tsv) \ --set REGISTRY_FROM_URL=${REGISTRY_DOCKERHUB_URL}/ \ --assign-identity ``` To avoid Docker throttling, add [Docker Hub credentials][docker-hub-tokens]: ```azurecli az acr task credential add \ -n node-public \ -r $REGISTRY_PUBLIC \ --login-server $REGISTRY_DOCKERHUB_URL \ -u https://${AKV}.vault.azure.net/secrets/registry-dockerhub-user \ -p https://${AKV}.vault.azure.net/secrets/registry-dockerhub-password \ --use-identity [system] ``` Grant access to ACR for reading values from KeyVault ```azurecli az keyvault set-policy \ --name $AKV \ --resource-group $AKV_RG \ --object-id $(az acr task show \ --name node-public \ --registry $REGISTRY_PUBLIC \ --query identity.principalId --output tsv) \ --secret-permissions get ``` [Tasks can be triggered][acr-task-triggers] by git commits, base image updates, scheduled runs or manually executed. Run the task to generate the `node` image ```azurecli az acr task run -r $REGISTRY_PUBLIC -n node-public ``` List the image in the simulated public registry ```azurecli az acr repository show-tags -n $REGISTRY_PUBLIC --repository node ``` ## Create the hello-world image Based on the simulated public node image, build a hello-world image. ### Create a Token for access to the "public" registry Using [ACR Tokens][acr-tokens], create access tokens, scoped to `pull` ```azurecli az keyvault secret set \ --vault-name $AKV \ --name "registry-${REGISTRY_PUBLIC}-user" \ --value "registry-${REGISTRY_PUBLIC}-user" az keyvault secret set \ --vault-name $AKV \ --name "registry-${REGISTRY_PUBLIC}-password" \ --value $(az acr token create \ --name "registry-${REGISTRY_PUBLIC}-user" \ --registry $REGISTRY_PUBLIC \ --scope-map _repositories_pull \ -o tsv \ --query credentials.passwords[0].value) ``` ### Create an ACR Token for access by ACI to pull the image A token to the registry with `hello-world` is created. Permissions are scoped to read (pull) ```azurecli az keyvault secret set \ --vault-name $AKV \ --name "registry-${REGISTRY}-user" \ --value "registry-${REGISTRY}-user" az keyvault secret set \ --vault-name $AKV \ --name "registry-${REGISTRY}-password" \ --value $(az acr token create \ --name "registry-${REGISTRY}-user" \ --registry $REGISTRY \ --repository hello-world content/read \ -o tsv \ --query credentials.passwords[0].value) ``` ### Create and maintain a `hello-world` image using ACR Tasks Simulating a public registry, which could be docker hub, provide credentials using [acr task credentials][acr-task-credentials]. Since the registry is an ACR, use the token created above. The [acr task credentials][acr-task-credentials] may be used to pass docker credentials to any registry, including Docker Hub. Within the `acr-task.yaml`, we deploy the newly built image to ACI. The resource group was created above. By calling `az container create` with only a difference in the `image:tag`, the same instance is used. ```azurecli az acr task create \ -n hello-world \ -r $REGISTRY \ -f acr-task.yaml \ --context $GIT_HELLO_WORLD \ --git-access-token $(az keyvault secret show \ --vault-name $AKV \ --name github-token \ --query value -o tsv) \ --set REGISTRY_FROM_URL=${REGISTRY_PUBLIC_URL}/ \ --set KEYVAULT=$AKV \ --set ACI=$ACI \ --set ACI_RG=$ACI_RG \ --assign-identity ``` Add credentials for our Public Registry ```azurecli az acr task credential add \ -n hello-world \ -r $REGISTRY \ --login-server $REGISTRY_PUBLIC_URL \ -u https://${AKV}.vault.azure.net/secrets/registry-${REGISTRY_PUBLIC}-user \ -p https://${AKV}.vault.azure.net/secrets/registry-${REGISTRY_PUBLIC}-password \ --use-identity [system] ``` Grant access to read values from the KeyVault ```azurecli az keyvault set-policy \ --name $AKV \ --resource-group $AKV_RG \ --object-id $(az acr task show \ --name hello-world \ --registry $REGISTRY \ --query identity.principalId --output tsv) \ --secret-permissions get ``` Grant the task access to create and manage ACI by granting access to the resource group: ```azurecli az role assignment create \ --assignee $(az acr task show \ --name hello-world \ --registry $REGISTRY \ --query identity.principalId --output tsv) \ --scope $(az group show -n $ACI_RG --query id -o tsv) \ --role owner ``` With the task created, run the task to build/deploy the hello-world image: ```azurecli az acr task run -r $REGISTRY -n hello-world ``` Once created, browse the site hosting the `hell-world` image. ```bash explorer.exe "http://"$(az container show \ --resource-group $ACI_RG \ --name ${ACI} \ --query ipAddress.ip \ --out tsv) ``` ## Update the base image with a "bad" change Open the `Dockerfile` in base-image-node repo Change the `BACKGROUND_COLOR` to `Red` to simulate a change that would break our environment. ```Dockerfile ARG REGISTRY_NAME= FROM ${REGISTRY_NAME}node:15-alpine ENV NODE_VERSION 15-alpine ENV BACKGROUND_COLOR Red ``` Commit the change and watch for ACR Tasks to automatically start building. Watch for the task to start executing: ```azurecli watch -n1 az acr task list-runs -r $REGISTRY_PUBLIC ``` You should eventually see STATUS `Succeeded` based on a TRIGGER of `Commit`: ```azurecli RUN ID TASK PLATFORM STATUS TRIGGER STARTED DURATION -------- -------- ---------- --------- --------- -------------------- ---------- ca4 hub-node linux Succeeded Commit 2020-10-24T05:02:29Z 00:00:22 ``` Type `CTRL-C` to exit the watch command, then view the logs for the most recent run: ```azurecli az acr task logs -r $REGISTRY_PUBLIC ``` Once the node image is completed, `watch` for ACR Tasks to automatically start the hello-world image: ```azurecli watch -n1 az acr task list-runs -r $REGISTRY ``` You should eventually see STATUS `Succeeded` based on a TRIGGER of `Image Update` ```azurecli RUN ID TASK PLATFORM STATUS TRIGGER STARTED DURATION -------- ----------- ---------- --------- ------------ -------------------- ---------- dau hello-world linux Succeeded Image Update 2020-10-24T05:08:45Z 00:00:31 ``` Type `CTRL-C` to exit the watch command, then view the logs for the most recent run: ```azurecli az acr task logs -r $REGISTRY ``` Once completed, browse the site hosting the updated `hell-world` image, which should have a red (broken) background. ```bash explorer.exe "http://"$(az container show \ --resource-group $ACI_RG \ --name ${ACI} \ --query ipAddress.ip \ --out tsv) ``` ## Checking in At this point, you've created a `hello-world` image that is automatically built on git commits, and changes to the base `node` image. While we've built against a base image in ACR, this could be any supported registry. The ACR Task base image update trigger automatically re-executes as the node image is updated. As seen here, not all updates are wanted. ## Gated imports of public content To prevent upstream changes from breaking critical workloads, security scanning and functional tests may be addedd. This section covers: * Build a test image * Run a functional test script `./test.sh` against the test image * If the image tests successfully, import the public image to the **baseimages** registry ### Write automation testing To gate any upstream content, automated testing is implemented. In this example, a `test.sh` is provided which checks the `$BACKGROUND_COLOR`. If the test fails, an `EXIT_CODE` of `1` is returned which causes the ACR Task step to fail, ending the task run. The tests can be expanded in any form of tools, including logging results. The gate is managed by a pass/fail response. ```bash if [ ""$(echo $BACKGROUND_COLOR | tr '[:lower:]' '[:upper:]') = 'RED' ]; then echo -e "\e[31mERROR: Invalid Color:\e[0m" ${BACKGROUND_COLOR} EXIT_CODE=1 else echo -e "\e[32mValidation Complete - No Known Errors\e[0m" fi exit ${EXIT_CODE} ``` The `acr-task.yaml` performs the following steps: * Build the test base image using the following dockerfile: ```dockerfile ARG REGISTRY_FROM_URL= FROM ${REGISTRY_FROM_URL}node:15-alpine WORKDIR /test COPY ./test.sh . CMD ./test.sh ``` * When completed, validate the image by running the container, which runs `./test.sh` * Only if successfully completed, run the import steps, which are gated with `when: ['validate-base-image']` ```yaml version: v1.1.0 steps: - id: build-test-base-image # Build off the base image we'll track # Add a test script to do unit test validations # Note: the test validation image isn't saved to the registry # but the task logs captures log validation results build: > --build-arg REGISTRY_FROM_URL={{.Values.REGISTRY_FROM_URL}} -f ./Dockerfile -t {{.Run.Registry}}/node-import:test . - id: validate-base-image # only continues if node-import:test returns a non-zero code when: ['build-test-base-image'] cmd: "{{.Run.Registry}}/node-import:test" - id: pull-base-image # import the public image to base-artifacts # Override the stable tag, # and create a unique tag to enable rollback # to a previously working image when: ['validate-base-image'] cmd: > docker pull {{.Values.REGISTRY_FROM_URL}}node:15-alpine - id: retag-base-image when: ['pull-base-image'] cmd: docker tag {{.Values.REGISTRY_FROM_URL}}node:15-alpine {{.Run.Registry}}/node:15-alpine - id: retag-base-image-unique-tag when: ['pull-base-image'] cmd: docker tag {{.Values.REGISTRY_FROM_URL}}node:15-alpine {{.Run.Registry}}/node:15-alpine-{{.Run.ID}} - id: push-base-image when: ['retag-base-image', 'retag-base-image-unique-tag'] push: - "{{.Run.Registry}}/node:15-alpine" - "{{.Run.Registry}}/node:15-alpine-{{.Run.ID}}" ``` Create an ACR Task to import and test the node base image ```azurecli az acr task create \ --name base-import-node \ -f acr-task.yaml \ -r $REGISTRY_BASE_ARTIFACTS \ --context $GIT_NODE_IMPORT \ --git-access-token $(az keyvault secret show \ --vault-name $AKV \ --name github-token \ --query value -o tsv) \ --set REGISTRY_FROM_URL=${REGISTRY_PUBLIC_URL}/ \ --assign-identity ``` Add credentials for our public registry ```azurecli az acr task credential add \ -n base-import-node \ -r $REGISTRY_BASE_ARTIFACTS \ --login-server $REGISTRY_PUBLIC_URL \ -u https://${AKV}.vault.azure.net/secrets/registry-${REGISTRY_PUBLIC}-user \ -p https://${AKV}.vault.azure.net/secrets/registry-${REGISTRY_PUBLIC}-password \ --use-identity [system] ``` Grant access to read values from the KeyVault ```azurecli az keyvault set-policy \ --name $AKV \ --resource-group $AKV_RG \ --object-id $(az acr task show \ --name base-import-node \ --registry $REGISTRY_BASE_ARTIFACTS \ --query identity.principalId --output tsv) \ --secret-permissions get ``` Run the import task: ```azurecli az acr task run -n base-import-node -r $REGISTRY_BASE_ARTIFACTS ``` If the task fails due to `./test.sh: Permission denied` assure the script has execution permissions and commit back to the git repo: ```bash chmod +x ./test.sh ``` ## Update the hello-world image to build from the gated node image Add a `AcrPull` token to access the base-artifacts registry ```azurecli az keyvault secret set \ --vault-name $AKV \ --name "registry-${REGISTRY_BASE_ARTIFACTS}-user" \ --value "registry-${REGISTRY_BASE_ARTIFACTS}-user" az keyvault secret set \ --vault-name $AKV \ --name "registry-${REGISTRY_BASE_ARTIFACTS}-password" \ --value $(az acr token create \ --name "registry-${REGISTRY_BASE_ARTIFACTS}-user" \ --registry $REGISTRY_BASE_ARTIFACTS \ --repository node content/read \ -o tsv \ --query credentials.passwords[0].value) ``` Add credentials for our Public Registry ```azurecli az acr task credential add \ -n hello-world \ -r $REGISTRY \ --login-server $REGISTRY_BASE_ARTIFACTS_URL \ -u https://${AKV}.vault.azure.net/secrets/registry-${REGISTRY_BASE_ARTIFACTS}-user \ -p https://${AKV}.vault.azure.net/secrets/registry-${REGISTRY_BASE_ARTIFACTS}-password \ --use-identity [system] ``` Change the REGISTRY_FROM_URL to use the BASE_ARTIFACTS registry ```azurecli az acr task update \ -n hello-world \ -r $REGISTRY \ --set KEYVAULT=$AKV \ --set REGISTRY_FROM_URL=${REGISTRY_BASE_ARTIFACTS_URL}/ \ --set ACI=$ACI \ --set ACI_RG=$ACI_RG ``` Run the hello-world task to change it's base image dependency ```azurecli az acr task run -r $REGISTRY -n hello-world ``` ## Update the base image with a "valid" change Open the `Dockerfile` in base-image-node repo Change the `BACKGROUND_COLOR` to `Green` to simulate a valid change. ```Dockerfile ARG REGISTRY_NAME= FROM ${REGISTRY_NAME}node:15-alpine ENV NODE_VERSION 15-alpine ENV BACKGROUND_COLOR Green ``` Commit the change and monitor the sequence of updates ```azurecli watch -n1 az acr task list-runs -r $REGISTRY_PUBLIC ``` Once running, `ctrl+C` and monitor the logs ```azurecli az acr task logs -r $REGISTRY_PUBLIC ``` Once complete, monitor the base-image-import task ```azurecli watch -n1 az acr task list-runs -r $REGISTRY_BASE_ARTIFACTS ``` Once running, `ctrl+C` and monitor the logs ```azurecli az acr task logs -r $REGISTRY_BASE_ARTIFACTS ``` Once complete, monitor the hello-world task ```azurecli watch -n1 az acr task list-runs -r $REGISTRY ``` Once running, `ctrl+C` and monitor the logs ```azurecli az acr task logs -r $REGISTRY ``` Once complete, view the ACI hello-world image. ```bash explorer.exe "http://"$(az container show \ --resource-group $ACI_RG \ --name ${ACI} \ --query ipAddress.ip \ --out tsv) ``` ### View the gated workflow Perform the above steps again, with a background color of red Open the `Dockerfile` in base-image-node repo Change the `BACKGROUND_COLOR` to `Red` to simulate a valid change. ```Dockerfile ARG REGISTRY_NAME= FROM ${REGISTRY_NAME}node:15-alpine ENV NODE_VERSION 15-alpine ENV BACKGROUND_COLOR Red ``` Commit the change and monitor the sequence of updates ```azurecli watch -n1 az acr task list-runs -r $REGISTRY_PUBLIC ``` Once running, `ctrl+C` and monitor the logs ```azurecli az acr task logs -r $REGISTRY_PUBLIC ``` Once complete, monitor the base-image-import task ```azurecli watch -n1 az acr task list-runs -r $REGISTRY_BASE_ARTIFACTS ``` Once running, `ctrl+C` and monitor the logs ```azurecli az acr task logs -r $REGISTRY_BASE_ARTIFACTS ``` At this point, you should see base-import-node fail validation and stop the sequence to publish a hello-world update. ### Publish an update to hello-world Changes to the hello-world image will continue using the last validated node image. Any additional changes to the base-node image that pass the gated validations will trigger base-updates to the hello-world image. ## Cleaning up ```azurecli az group delete -n $REGISTRY_RG --no-wait -y az group delete -n $REGISTRY_PUBLIC_RG --no-wait -y az group delete -n $REGISTRY_BASE_ARTIFACTS_RG --no-wait -y az group delete -n $AKV_RG --no-wait -y az group delete -n $ACI_RG --no-wait -y ``` ## Next steps * [Adopt tagging scheme for base image updates](container-registry-image-tag-version.md) * [Build images from stable service tags - can continue to receive security patches and framework updates.](container-registry-image-tag-version.md) * [Protect images using Image/tag locking](container-registry-image-lock.md) [acr]: https://aka.ms/acr [acr-repo-permissions]: https://aka.ms/acr/repo-permissions [acr-task]: https://aka.ms/acr/tasks [acr-task-triggers]: https://docs.microsoft.com/en-us/azure/container-registry/container-registry-tasks-overview#task-scenarios [acr-task-credentials]: https://docs.microsoft.com/en-us/azure/container-registry/container-registry-tasks-authentication-managed-identity#4-optional-add-credentials-to-the-task [acr-tokens]: https://aka.ms/acr/tokens [aci]: https://aka.ms/aci [alpine-public-image]: https://hub.docker.com/_/alpine [docker-hub]: https://hub.docker.com [docker-hub-tokens]: https://hub.docker.com/settings/security [git-token]: https://github.com/settings/tokens [gcr]: https://cloud.google.com/container-registry [ghcr]: https://docs.github.com/en/free-pro-team@latest/packages/getting-started-with-github-container-registry/about-github-container-registry [helm-charts]: https://helm.sh [mcr]: https://aka.ms/mcr [nginx-public-image]: https://hub.docker.com/_/nginx [oci-artifacts]: https://aka.ms/acr/artifacts [oci-consuming-public-content]: https://docs.google.com/document/d/1fxayMznIkszBI9Y2S3KGSyi2hFMwUIwDfn3D2wQcye4/edit?usp=sharing [opa]: https://www.openpolicyagent.org/ [quay]: https://quay.io ================================================ FILE: docs/container-registry-oras-artifacts.md ================================================ --- title: Push and pull Supply Chain Artifacts description: Push and pull supply chain artifacts, using a private container registry in Azure author: SteveLasker manager: gwallace ms.topic: article ms.date: 11/11/2021 ms.author: stevelas --- # Push and pull supply chain artifacts, using a private container registry in Azure (Preview) Use an Azure container registry to store and manage a graph of artifacts, including signatures, software bill of materials, security scan results or other types. ![](./media/container-registry-artifacts/oras-artifact-graph.svg) To demonstrate this capability, this article shows how to use the [OCI Registry as Storage (ORAS)](https://oras.land) tool to push and pull a graph of artifacts to an Azure container registry. ## Prerequisites * **Azure container registry** - Create a container registry in your Azure subscription. During the preview of ORAS Artifacts support, the registry must be created in specific regsions. * **ORAS CLI** - The ORAS CLI enables push, discover, pull of artifacts to an ORAS Artifacts enabled registry. * **Azure CLI** - To create an identity, list and delete repositories, you need a local installation of the Azure CLI. Version 2.29.1 or later is recommended. Run `az --version `to find the version. If you need to install or upgrade, see [Install Azure CLI](/cli/azure/install-azure-cli). * **Docker (optional)** - To complete the walkthrough, a container image is referenced. You can use Docker installed locally to build and push a container image, or reference an existing container image. Docker provides packages that easily configure Docker on any [macOS][docker-mac], [Windows][docker-windows], or [Linux][docker-linux] system. ## Preview limitations ORAS Artifacts support is limited to the South Central US region, with Availability Zone support. * Geo-replicated registries will not replicate referenced artifacts to other regions. As additional regions support ORAS Artifacts, the referenced artifacts will be replicated. ## ORAS installation Download and install a preview ORAS release for your operating system. See [ORAS Install instructions][oras-install-docs] for how to extract and install the file for your operating system, referencing an Alpha.1 preview build from the [ORAS GitHub repo][oras-preview-install] ## Configure a private registry Configure environment variables to easily copy/paste commands into your shell. The commands can be run in the [Azure Cloud Shell](https://http://shell.azure.com/) ```console ACR_NAME=myregistry REGISTRY=$ACR_NAME.azurecr.io REPO=net-monitor TAG=v1 IMAGE=$REGISTRY/${REPO}:$TAG ``` ### Create a resource group If needed, run the [az group create](/cli/azure/group#az_group_create) command to create a resource group for the registry. ```azurecli az group create --name $ACR_NAME --location southcentralus ``` ### Create ORAS Artifact enabled registry Preview support for ORAS Artifacts requires Zone Redundancy, which requires a Premium service tier, in the South Central US region. Run the [az acr create](/cli/azure/acr#az_acr_create) command to create an ORAS Artifacts enabled registry. See the `az acr create` command help for more registry options. ```azurecli az acr create \ --resource-group $ACR_NAME \ --name $ACR_NAME \ --zone-redundancy enabled \ --sku Premium \ --output jsonc ``` In the command output, note the `zoneRedundancy` property for the registry. When enabled, the registry is zone redundant, and ORAS Artifact enabled: ```JSON { [...] "zoneRedundancy": "Enabled", } ``` ### Sign in with Azure CLI [Sign in](/cli/azure/authenticate-azure-cli) to the Azure CLI with your identity to push and pull artifacts from the container registry. Then, use the Azure CLI command [az acr login](/cli/azure/acr#az_acr_login) to access the registry. ```azurecli az login az acr login --name $ACR_NAME ``` > [!NOTE] > `az acr login` uses the Docker client to set an Azure Active Directory token in the `docker.config` file. The Docker client must be installed and running to complete the individual authentication flow. ## Sign in with ORAS This section shows options to sign into the registry. Choose the method appropriate for your environment. Run `oras login` to authenticate with the registry. You may pass [registry credentials](container-registry-authentication.md) appropriate for your scenario, such as service principal credentials, user identity, or a repository-scoped token (preview). - Authenticate with your [individual Azure AD identity](container-registry-authentication.md?tabs=azure-cli#individual-login-with-azure-ad) to use an AD token. ```bash USER_NAME="00000000-0000-0000-0000-000000000000" PASSWORD=$(az acr login --name $ACR_NAME --expose-token --output tsv --query accessToken) ``` - Authenticate with a [repository scoped token](container-registry-repository-scoped-permissions.md) (Preview) to use non-AD based tokens. ```bash USER_NAME="oras-token" PASSWORD=$(az acr token create -n $USER_NAME \ -r $ACR_NAME \ --repository $REPO content/write \ --only-show-errors \ --query "credentials.passwords[0].value" -o tsv) ``` - Authenticate with an Azure Active Directory [service principal with pull and push permissions](container-registry-auth-service-principal.md#create-a-service-principal) (AcrPush role) to the registry. ```bash SERVICE_PRINCIPAL_NAME="oras-sp" ACR_REGISTRY_ID=$(az acr show --name $ACR_NAME --query id --output tsv) PASSWORD=$(az ad sp create-for-rbac --name $SERVICE_PRINCIPAL_NAME \ --scopes $(az acr show --name $ACR_NAME --query id --output tsv) \ --role acrpush \ --query "password" --output tsv) USER_NAME=$(az ad sp list --display-name $SERVICE_PRINCIPAL_NAME --query "[].appId" --output tsv) ``` ### Sign in with ORAS Supply the credentials to `oras login`. ```bash oras login $REGISTRY \ --username $USER_NAME \ --password $PASSWORD ``` To read the password from Stdin, use `--password-stdin`. ## Push a container image This example associates a graph of artifacts to a container image. Build and push a container image, or reference an existing image in the private registry. ```bash docker build -t $IMAGE https://github.com/wabbit-networks/net-monitor.git#main docker push $IMAGE ``` ## Create a sample signature to the container image ```bash echo '{"artifact": "'${IMAGE}'", "signature": "pat hancock"}' > signature.json ``` ### Push a signature to the registry, as a reference to the container image The ORAS command pushes the signature to a repository, referencing another artifact through the `subject` parameter. The `--artifact-type` provides for differentiating artifacts, similar to file extensions enable different file types. One or more files can be pushed by specifying `file:mediaType` ```bash oras push $REGISTRY/$REPO \ --artifact-type 'signature/example' \ --subject $IMAGE \ ./signature.json:application/json ``` For more information on oras push, see [ORAS documentation][oras-push-docs]. ## Push a multi-file artifact as a reference Create some documentation around an artifact ```bash echo 'Readme Content' > readme.md echo 'Detailed Content' > readme-details.md ``` Push the multi-file artifact as a reference ```bash oras push $REGISTRY/$REPO \ --artifact-type 'readme/example' \ --subject $IMAGE \ ./readme.md:application/markdown \ ./readme-details.md:application/markdown ``` ## Discovering artifact references The ORAS Artifacts Specification defines a [referrers API][oras-artifacts-referrers] for discovering references to a `subject` artifact. The `oras discover` command can show the list of references to the container image. Using `oras discover`, view the graph of artifacts now stored in the registry ```bash oras discover -o tree $IMAGE ``` The output shows the beginning of a graph of artifacts, where the signature and docs are viewed as a children of the container image ```output myregistry.azurecr.io/net-monitor:v1 ├── signature/example │   └── sha256:555ea91f39e7fb30c06f3b7aa483663f067f2950dcb... └── readme/example └── sha256:1a118663d1085e229ff1b2d4d89b5f6d67911f22e55... ``` ## Creating a deep graphs of artifacts The ORAS Artifacts specification enables deep graphs, enabling signed software bill of materials (SBoM) and other artifact types. ### Create a sample SBoM ```bash echo '{"version": "0.0.0.0", "artifact": "'${IMAGE}'", "contents": "good"}' > sbom.json ``` ### Push a sample SBoM to the registry ```bash oras push $REGISTRY/$REPO \ --artifact-type 'sbom/example' \ --subject $IMAGE \ ./sbom.json:application/json ``` ### Sign the SBoM Artifacts that are pushed as references, typically do not have tags as they are considered part of the subject artifact. To push a signature to an artifact that is a child of another artifact, use the `oras discover` with `--artifact-type` filtering to find the digest. ```bash SBOM_DIGEST=$(oras discover -o json \ --artifact-type sbom/example \ $IMAGE | jq -r ".references[0].digest") ``` Create a signature of an SBoM ```bash echo '{"artifact": "'$REGISTRY/${REPO}@$SBOM_DIGEST'", "signature": "pat hancock"}' > sbom-signature.json ``` ### Push the SBoM signature ```bash oras push $REGISTRY/$REPO \ --artifact-type 'signature/example' \ --subject $REGISTRY/$REPO@$SBOM_DIGEST \ ./sbom-signature.json:application/json ``` ### View the graph ```bash oras discover -o tree $IMAGE ``` Generates the following output: ```output myregistry.azurecr.io/net-monitor:v1 ├── signature/example │   └── sha256:555ea91f39e7fb30c06f3b7aa483663f067f2950dcb... ├── readme/example │   └── sha256:1a118663d1085e229ff1b2d4d89b5f6d67911f22e55... └── sbom/example └── sha256:4280eef9adb632b42cf200e7cd5a822a456a558e4f3142da6b... └── signature/example └── sha256:a31ab875d37eee1cca68dbb14b2009979d05594d44a075bdd7... ``` ## Pull the Docs To pull a referenced type, the digest of reference is discovered with the `oras discover` command ```bash DOC_DIGEST=$(oras discover -o json \ --artifact-type 'readme/example' \ $IMAGE | jq -r ".references[0].digest") ``` ### Create a clean directory for downloading ```bash mkdir ./download ``` ### Pull the docs into the download directory ```bash oras pull -a -o ./download $REGISTRY/$REPO@$DOC_DIGEST ``` ### View the docs ```bash ls ./download ``` ## View the repository and tag listing ORAS Artifacts enables artifact graphs to be pushed, discovered, pulled and copied without having to assign tags. This enables a tag listing to focus on the artifacts users think about, as opposed to the signatures and SBoMs that are associated with the container images, helm charts and other artifacts. ### View a list of tags ```azurecli az acr repository show-tags \ -n $ACR_NAME \ --repository $REPO \ -o jsonc ``` ### View a list of manifests A repository can have a list of manifests that are both tagged and untagged ```azurecli az acr repository show-manifests \ -n $ACR_NAME \ --repository $REPO \ --detail -o jsonc ``` Note the container image manifests have `"tags":` ```json { "architecture": "amd64", "changeableAttributes": { "deleteEnabled": true, "listEnabled": true, "readEnabled": true, "writeEnabled": true }, "configMediaType": "application/vnd.docker.container.image.v1+json", "createdTime": "2021-11-12T00:18:54.5123449Z", "digest": "sha256:a0fc570a245b09ed752c42d600ee3bb5b4f77bbd70d8898780b7ab4...", "imageSize": 2814446, "lastUpdateTime": "2021-11-12T00:18:54.5123449Z", "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "os": "linux", "tags": [ "v1" ] } ``` The signature is untagged, but tracked as a `oras.artifact.manifest` reference to the container image ```json { "changeableAttributes": { "deleteEnabled": true, "listEnabled": true, "readEnabled": true, "writeEnabled": true }, "createdTime": "2021-11-12T00:19:10.987156Z", "digest": "sha256:555ea91f39e7fb30c06f3b7aa483663f067f2950dcbcc0b0d...", "imageSize": 85, "lastUpdateTime": "2021-11-12T00:19:10.987156Z", "mediaType": "application/vnd.cncf.oras.artifact.manifest.v1+json" } ``` ## Delete all artifacts in the graph Support for the ORAS Artifacts specification enables deleting the graph of artifacts associated with the root artifact. Use the [az acr repository delete][az-acr-repository-delete] command to delete the signature, SBoM and the signature of the SBoM. ```bash az acr repository delete \ -n $ACR_NAME \ -t ${REPO}:$TAG -y ``` ### View the remaining manifests ```azurecli az acr repository show-manifests \ -n $ACR_NAME \ --repository $REPO \ --detail -o jsonc ``` ## Next steps * Learn more about [the ORAS cli](https://oras.land) * Learn more about [ORAS Artifacts][oras-artifacts] for how to push, discover, pull, copy a graph of supply chain artifacts [docker-linux]: https://docs.docker.com/engine/installation/#supported-platforms [docker-mac]: https://docs.docker.com/docker-for-mac/ [docker-windows]: https://docs.docker.com/docker-for-windows/ [oras-install-docs]: https://oras.land/cli/ [oras-preview-install]: https://github.com/oras-project/oras/releases/tag/v0.2.1-alpha.1 [oras-push-docs]: https://oras.land/cli/1_pushing/ [oras-artifacts]: https://github.com/oras-project/artifacts-spec/ [az-acr-repository-show]: /cli/azure/acr/repository?#az_acr_repository_show [az-acr-repository-delete]: /cli/azure/acr/repository#az_acr_repository_delete ================================================ FILE: docs/contributing-to-pages.md ================================================ # Instructions to get started ## Prerequisites ### YARN Install `vuepress` globally using yarn. Here are the instructions for debian which can be used for WSL as well. https://yarnpkg.com/en/docs/install#debian-stable ```sh curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add - echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list sudo apt update && sudo apt install yarn ``` > Make sure you have yarn in your path. ## Vuepress Install Vuepress with yarn. ```sh yarn global add vuepress ``` # To view the pages ```sh cd ./docs/ vuepress dev . ``` # Publish content https://v1.vuepress.vuejs.org/guide/deploy.html#github-pages ```sh cd docs vuepress build . cd gh-pages git init git add -A git commit -m 'deploy' git push -f git@github.com:Azure/acr.git master:gh-pages ``` ================================================ FILE: docs/custom-domain/README.md ================================================ # Using Custom Domains with Azure Container Registry **Important - Using a custom domain in Azure Container Registry is a private preview feature.** **If your registry has already been enabled for a custom domain and you need support, please open an issue in this repository.** Every ACR is accessed using its login server. If you have a registry called `myregistry`, you access it using its default hostname, `myregistry.azurecr.io` (in Azure Public Cloud.) As a customer belonging to an organization, you may prefer to access your registry using a custom domain that is associated with your organization, for instance, `container-registry.contoso.com`. The following steps describe how you can achieve this. **The following sections describe preparation steps for the private preview. THESE STEPS ARE NOT SUFFICIENT TO ENABLE A CUSTOM DOMAIN FOR YOUR REGISTRY WITHOUT ACCEPTANCE INTO THE PRIVATE PREVIEW.** ## Prerequisites - [Azure CLI](https://docs.microsoft.com/cli/azure/?view=azure-cli-latest): version 2.4.0 or higher - Consider using [Azure Cloud Shell](https://docs.microsoft.com/azure/cloud-shell/overview) - A _premium_ Azure Container Registry. See [here](https://docs.microsoft.com/azure/container-registry/container-registry-get-started-azure-cli) for instructions on how to create one. - Your custom domain names. The following two are required: - Custom registry domain to access the registry REST API. Example for the `contoso.com` domain: `container-registry.contoso.com` - Custom data domain to access the registry content. Again, example for `contoso.com`: `eastus-registry-data.contoso.com` - Note that the custom data domain is region specific. For geo-replicated registries, each region should have its own custom data endpoint. For each domain, you must prepare a single PEM formatted file containing the TLS private key and the public certificate: ``` -----BEGIN PRIVATE KEY----- XXXXXX -----END PRIVATE KEY----- -----BEGIN CERTIFICATE----- XXXXXX -----END CERTIFICATE----- ``` If you use a certificate bundle, prepare a single PEM formatted file containing the TLS private key and each public certificate: ``` ---BEGIN PRIVATE KEY----- XXXXXX -----END PRIVATE KEY------- -----BEGIN CERTIFICATE----- XXXXX-01 -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- XXXXXX-02 -----END CERTIFICATE---- [etc.] ``` For example, using [openssl](https://github.com/openssl/openssl): - Create a self-signed public cert and private key ```shell openssl req -nodes -x509 -newkey rsa:4096 \ -keyout container-registry.contoso.com.key.pem \ -out container-registry.contoso.com.cert.pem -days 365 \ -subj '/CN=container-registry.contoso.com/O=Contoso./C=US' \ -addext "subjectAltName = DNS:container-registry.contoso.com" ``` - Create a single file containing both the public certificate (or certificates, in the case of a certificate bundle) and private key ```shell cat container-registry.contoso.com.key.pem \ >> container-registry-contoso-com.pem cat container-registry.contoso.com.cert.pem \ >> container-registry-contoso-com.pem ``` - For each data domain, follow the same steps above to prepare the PEM formatted files containing the public certificate and private key. Azure Key Vault allows you to [create](https://docs.microsoft.com/azure/key-vault/certificate-scenarios) Certificate Authority (CA) signed certificates. - If you choose to use the Azure Portal to create the certificates, be sure to select certificate content type as PEM. ## Prepare your existing registry We will enable two features on your registry: - Data Endpoints:\ This feature provides a dedicated endpoint for downloading content from your registry. If you have a registry in East US, on enabling this feature, a data endpoint is automatically created for you: `myregistry.eastus.data.azurecr.io` - ACR Managed Identities:\ Managed Identities provide a mechanism to associate an Azure Active Directory identity with your registry, while relieving you of the burden of managing credentials. To learn more, see the documentation [here](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview).\ ACR supports both user assigned and system assigned managed identities. ### Enable data endpoints and managed idenitites 1. `az login` 2. `az account set -s ` 3. `az acr update --data-endpoint-enabled true -n myregistry` 4. You can either enable a system assigned managed identity, a user assigned managed identity, or both for your registry. We recommend using system assigned managed identity to enable advanced scenarios with virtual networks that, although not supported currently, are [coming soon](#enhanced-security-with-virtual-networks). Do _one_ of the following: - To enable only system assigned managed identity: - `az acr identity assign -n myregistry --identities [system]` - To enable user assigned managed identity, with or without a system identity: - Create a user assigned managed identity following the instructions [here](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-manage-ua-identity-portal). - Do _one_ of the following: - To enable _only_ user assigned managed identity: - `az acr identity assign -n myregistry --identities ""` - To enable _both_ user and system assigned managed identities: - `az acr identity assign -n myregistry --identities "" [system]` ## Prepare your Azure Key Vault For each domain, its TLS private key and public certificate pair must be added to an Azure Key Vault that is accessible by your registry as a single PEM formatted file. We recommend creating a new key vault containing only your TLS certificates and granting the registry's identity access to `get` secret. 1. [Create](https://docs.microsoft.com/azure/key-vault/) a new Azure Key Vault. 2. [Add](https://docs.microsoft.com/azure/key-vault/certificate-scenarios) your certificates to the key vault. 3. Add an access policy to the key vault that grants your registry's identity access to `get` secret:\ `az keyvault set-policy --name --secret-permissions get --spn ` - The output of the command to enable managed identities on the registry will contain the principal ids of the assiged identities. - Alternatively, you may obtain the principal ids using `az cli`: - For system assigned managed identity: - `az acr show -n myregistry --query identity.principalId -o tsv` - For user assigned managed identities, you may list them as follows and use the desired principal ID: - `az acr show -n myregistry --query identity.userAssignedIdentities` For greater isolation, we recommend that you put each certificate in its own key vault and set its access policy independently. The registry should always have access to the key vault secrets. ### Certificate updates and rotation You have two options for updating the certificates used for custom domains: * **Automatic updates** - If you reference a custom domain certificate with a [non-versioned](https://docs.microsoft.com/azure/key-vault/general/about-keys-secrets-certificates#objects-identifiers-and-versioning) secret ID, the registry regularly checks the key vault and automatically uses the latest certificate version there for its operations. To rotate or update a custom domain certificate, upload the new certificate version to the secret's location in the key vault. The registry automatically uses the latest certificate version within a short time. > NOTE: after the certificate is updated, the registry may serve a mix of the old and new certificate versions for upto 15 minutes until all caches have been refreshed. * **Manual updates** - If you reference a domain certificate with a [versioned](https://docs.microsoft.com/azure/key-vault/general/about-keys-secrets-certificates#objects-identifiers-and-versioning) secret ID, the registry does not configure automatic certificate rotation. After you upload a new certificate version to the key vault, the certificate must be manually rotated in the registry. Contact [Azure Support](https://azure.microsoft.com/support/create-ticket/). ### Enhanced security with Virtual Networks If you restrict the access of Azure Key Vault to a specific virtual network, you need to [grant access to trusted Azure services](https://learn.microsoft.com/en-us/azure/key-vault/general/overview-vnet-service-endpoints#grant-access-to-trusted-azure-services) which allow Azure Container Registry service to download the certificate. ## Prepare your DNS zone 1. The custom registry domain must have a CNAME record with the target registry login server:\ `container-registry.contoso.com` --> `myregistry.azurecr.io` 2. The regional custom data domain must have a CNAME record with the target regional registry data endpoint:\ `eastus-registry-data.contoso.com` --> `myregistry.eastus.data.azurecr.io` - The output of the command to enable data endpoints on the registry will contain the regional data endpoint. ## Contact us As a final step, share the following with us by creating a support ticket ([Azure Support](https://aka.ms/azuresupport)): * Custom registry domain details * custom registry domain (container-registry.contoso.com) * key vault secret ID of the corresponding TLS data * client ID of the user assigned registry identity that has access to this secret (not required in case of system assigned) * Custom data domain details * regional custom data domain (eastus-registry-data.contoso.com) * key vault secret ID of the corresponding TLS data * client ID of the user assigned registry identity that has access to this secret (not required in case of system assigned) ================================================ FILE: docs/custom-domain/deprecated/docker-vm-deploy/azuredeploy.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "newStorageAccountName": { "type": "string", "metadata": { "description": "Unique DNS Name for the Storage Account where the Virtual Machine's disks will be placed." } }, "adminUsername": { "type": "string", "metadata": { "description": "Username for the Virtual Machine." } }, "adminPassword": { "type": "securestring", "metadata": { "description": "Password for the Virtual Machine." } }, "dnsNameForVM": { "type": "string", "metadata": { "description": "Unique DNS Name for the Public IP used to access the Virtual Machine." } }, "ubuntuOSVersion": { "type": "string", "defaultValue": "14.04.4-LTS", "metadata": { "description": "The Ubuntu version for deploying the Docker containers. This will pick a fully patched image of this given Ubuntu version. Allowed values: 14.04.4-LTS, 15.10, 16.04.0-LTS" }, "allowedValues": [ "14.04.4-LTS", "15.10", "16.04.0-LTS" ] }, "newVmName": { "type": "string", "metadata": { "description": "Name of the new VM to create" } }, "vaultName": { "type": "string", "metadata": { "description": "Name of Key Vault that has a secret" } }, "vaultResourceGroup": { "type": "string", "metadata": { "description": "Resource Group of Key Vault that has a secret" } }, "secretUrlWithVersion": { "type": "string", "metadata": { "description": "Url of the certificate in Key Vault" } }, "certThumbPrint": { "type": "string", "metadata": { "description": "Thumb print for the key for above url" } }, "dnsFrontEnd": { "type": "string", "metadata": { "description": "DNS for the front end service." } }, "backendRegistry": { "type": "string", "metadata": { "description": "Azure container registry serving as backend." } }, "caCertUrl": { "type": "string", "defaultValue": "", "metadata": { "description": "URL for the signing certificate authority cert. If any" } } }, "variables": { "imagePublisher": "Canonical", "imageOffer": "UbuntuServer", "OSDiskName": "osdiskfordockersimple", "nicName": "[concat(parameters('newVmName'), 'NIC')]", "addressPrefix": "10.0.0.0/16", "subnetName": "Subnet", "subnetPrefix": "10.0.0.0/24", "storageAccountType": "Standard_LRS", "publicIPAddressName": "[concat(parameters('newVmName'), 'PublicIPD')]", "publicIPAddressType": "Dynamic", "vmStorageAccountContainerName": "vhds", "vmSize": "Standard_F1", "virtualNetworkName": "[concat(parameters('newVmName'), 'VNET')]", "vnetID": "[resourceId('Microsoft.Network/virtualNetworks',variables('virtualNetworkName'))]", "subnetRef": "[concat(variables('vnetID'),'/subnets/',variables('subnetName'))]" }, "resources": [ { "type": "Microsoft.Storage/storageAccounts", "name": "[parameters('newStorageAccountName')]", "apiVersion": "2015-05-01-preview", "location": "[resourceGroup().location]", "properties": { "accountType": "[variables('storageAccountType')]" } }, { "apiVersion": "2015-05-01-preview", "type": "Microsoft.Network/publicIPAddresses", "name": "[variables('publicIPAddressName')]", "location": "[resourceGroup().location]", "properties": { "publicIPAllocationMethod": "[variables('publicIPAddressType')]", "dnsSettings": { "domainNameLabel": "[parameters('dnsNameForVM')]" } } }, { "apiVersion": "2015-05-01-preview", "type": "Microsoft.Network/virtualNetworks", "name": "[variables('virtualNetworkName')]", "location": "[resourceGroup().location]", "properties": { "addressSpace": { "addressPrefixes": [ "[variables('addressPrefix')]" ] }, "subnets": [ { "name": "[variables('subnetName')]", "properties": { "addressPrefix": "[variables('subnetPrefix')]" } } ] } }, { "apiVersion": "2015-05-01-preview", "type": "Microsoft.Network/networkInterfaces", "name": "[variables('nicName')]", "location": "[resourceGroup().location]", "dependsOn": [ "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]", "[concat('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]" ], "properties": { "ipConfigurations": [ { "name": "ipconfig1", "properties": { "privateIPAllocationMethod": "Dynamic", "publicIPAddress": { "id": "[resourceId('Microsoft.Network/publicIPAddresses',variables('publicIPAddressName'))]" }, "subnet": { "id": "[variables('subnetRef')]" } } } ] } }, { "apiVersion": "2015-05-01-preview", "type": "Microsoft.Compute/virtualMachines", "name": "[parameters('newVmName')]", "location": "[resourceGroup().location]", "dependsOn": [ "[concat('Microsoft.Storage/storageAccounts/', parameters('newStorageAccountName'))]", "[concat('Microsoft.Network/networkInterfaces/', variables('nicName'))]" ], "properties": { "hardwareProfile": { "vmSize": "[variables('vmSize')]" }, "osProfile": { "computerName": "[parameters('newVmName')]", "adminUsername": "[parameters('adminUsername')]", "adminPassword": "[parameters('adminPassword')]", "secrets": [ { "sourceVault": { "id": "[resourceId(parameters('vaultResourceGroup'), 'Microsoft.KeyVault/vaults', parameters('vaultName'))]" }, "vaultCertificates": [ { "certificateUrl": "[parameters('secretUrlWithVersion')]" } ] } ] }, "storageProfile": { "imageReference": { "publisher": "[variables('imagePublisher')]", "offer": "[variables('imageOffer')]", "sku": "[parameters('ubuntuOSVersion')]", "version": "latest" }, "osDisk": { "name": "osdisk1", "vhd": { "uri": "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net/',variables('vmStorageAccountContainerName'),'/',variables('OSDiskName'),'.vhd')]" }, "caching": "ReadWrite", "createOption": "FromImage" } }, "networkProfile": { "networkInterfaces": [ { "id": "[resourceId('Microsoft.Network/networkInterfaces',variables('nicName'))]" } ] } } }, { "type": "Microsoft.Compute/virtualMachines/extensions", "name": "[concat(parameters('newVmName'),'/docker')]", "apiVersion": "2015-05-01-preview", "location": "[resourceGroup().location]", "dependsOn": [ "[concat('Microsoft.Compute/virtualMachines/', parameters('newVmName'))]" ], "properties": { "publisher": "Microsoft.Azure.Extensions", "type": "DockerExtension", "typeHandlerVersion": "1.0", "autoUpgradeMinorVersion": true, "settings": { } } }, { "type": "Microsoft.Compute/virtualMachines/extensions", "name": "[concat(parameters('newVmName'),'/initdevbox')]", "apiVersion": "2015-06-15", "location": "[resourceGroup().location]", "dependsOn": [ "[concat('Microsoft.Compute/virtualMachines/', parameters('newVmName'))]" ], "properties": { "publisher": "Microsoft.Azure.Extensions", "type": "CustomScript", "typeHandlerVersion": "2.0", "autoUpgradeMinorVersion": true, "settings": { "fileUris": ["https://raw.githubusercontent.com/Azure/acr/main/docs/custom-domain/deprecated/docker-vm-deploy/deploy-nginx-docker.sh"] }, "protectedSettings": { "commandToExecute": "[concat('./deploy-nginx-docker.sh ', parameters('certThumbPrint'), ' ', parameters('backendRegistry'), ' ', parameters('dnsFrontEnd'), ' \"', parameters('caCertUrl'), '\"')]" } } } ] } ================================================ FILE: docs/custom-domain/deprecated/docker-vm-deploy/azuredeploy.parameters.json ================================================ { "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", "contentVersion": "1.0.0.0", "parameters": { "newVmName": { "value": "" }, "newStorageAccountName": { "value": "" }, "adminUsername": { "value": "" }, "adminPassword": { "value": "" }, "dnsNameForVM": { "value": "" }, "vaultName": { "value": "" }, "vaultResourceGroup": { "value": "" }, "secretUrlWithVersion": { "value": "" }, "certThumbPrint": { "value": "" }, "dnsFrontEnd": { "value": "" }, "backendRegistry": { "value": "" }, "caCertUrl": { "value": "" } } } ================================================ FILE: docs/custom-domain/deprecated/docker-vm-deploy/deploy-nginx-docker.sh ================================================ #!/bin/bash set -e CERT_FINGERPRINT=$1 export BACKEND_HOST=$2 export FRONTEND_HOST=$3 CA_CERT_URL=$4 SOURCE_ROOT="https://raw.githubusercontent.com/Azure/acr/main" curl "$SOURCE_ROOT/docs/custom-domain/deprecated/docker-vm-deploy/setup-certs.sh" -o setup-certs.sh chmod +x ./setup-certs.sh . ./setup-certs.sh $CERT_FINGERPRINT $CA_CERT_URL export CONTAINER_CERT_LOCATION="/etc/nginx/ssl/cert.crt" export CONTAINER_PRV_LOCATION="/etc/nginx/ssl/private.key" curl "$SOURCE_ROOT/docs/custom-domain/deprecated/docker-vm-deploy/docker-compose.yml.template" -o docker-compose.yml.template sudo -E envsubst '$CERT_LOCATION$PRV_LOCATION$CONTAINER_CERT_LOCATION$CONTAINER_PRV_LOCATION' < docker-compose.yml.template > docker-compose.yml export CERT_LOCATION=$CONTAINER_CERT_LOCATION export PRV_LOCATION=$CONTAINER_PRV_LOCATION curl "$SOURCE_ROOT/docs/custom-domain/deprecated/docker-vm-deploy/nginx.conf.template" -o nginx.conf.template sudo -E envsubst '$FRONTEND_HOST$BACKEND_HOST$CERT_LOCATION$PRV_LOCATION' < nginx.conf.template > nginx.conf ## Docker installation extension installs docker in the background ## So we cannot make assumption about its completion time until docker-compose up do sleep 10 done ================================================ FILE: docs/custom-domain/deprecated/docker-vm-deploy/deploy.ps1 ================================================ param ( $templateFile = 'azuredeploy.json', $templateParams = 'azuredeploy.parameters.json', [Parameter(Mandatory=$true)] [string] $resourceGroupName ) New-AzureRmResourceGroupDeployment -ResourceGroupName $resourceGroupName -TemplateFile $templateFile -TemplateParameterFile $templateParams ================================================ FILE: docs/custom-domain/deprecated/docker-vm-deploy/docker-compose.yml.template ================================================ proxy: image: nginx ports: - 443:443 volumes: - ${CERT_LOCATION}:${CONTAINER_CERT_LOCATION}:ro - ${PRV_LOCATION}:${CONTAINER_PRV_LOCATION}:ro - ./nginx.conf:/etc/nginx/nginx.conf:ro ================================================ FILE: docs/custom-domain/deprecated/docker-vm-deploy/nginx.conf.template ================================================ error_log /var/log/nginx/error.log warn; pid /var/run/nginx.pid; events { worker_connections 1024; } http { include /etc/nginx/mime.types; default_type application/octet-stream; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log /var/log/nginx/access.log; # main; sendfile on; #tcp_nopush on; keepalive_timeout 65; #gzip on; include /etc/nginx/conf.d/*.conf; upstream backends { server ${BACKEND_HOST}:443; } server { listen 443 ssl; # 'ssl' parameter tells NGINX to decrypt the traffic server_name ${FRONTEND_HOST}; ssl_certificate ${CERT_LOCATION}; # The certificate file ssl_certificate_key ${PRV_LOCATION}; # The private key file location / { client_max_body_size 1000G; proxy_set_header Host ${BACKEND_HOST}; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_pass https://backends; } } } ================================================ FILE: docs/custom-domain/deprecated/docker-vm-deploy/setup-certs.sh ================================================ #!/bin/bash CERT_FINGERPRINT=$1 CA_CERT_URL=$2 CERT_FINGERPRINT=`echo $CERT_FINGERPRINT | tr [a-z] [A-Z]` if [ ! -z "$CA_CERT_URL" ]; then curl $CA_CERT_URL -o ca_cert.crt set +e certDetails=`openssl x509 -in ca_cert.crt -text -noout` set -e # if it is not PEM, it must be DER if [ -z "$certDetails" ]; then openssl x509 -in ca_cert.crt -inform der -outform pem -out ca_cert_pem.crt else mv ca_cert.crt ca_cert_pem.crt fi sudo cat "/var/lib/waagent/$CERT_FINGERPRINT.crt" ca_cert_pem.crt > cert.crt export CERT_LOCATION=`pwd`/cert.crt else export CERT_LOCATION=/var/lib/waagent/${CERT_FINGERPRINT}.crt fi export PRV_LOCATION=/var/lib/waagent/${CERT_FINGERPRINT}.prv ================================================ FILE: docs/custom-domain/deprecated/key-vault-setup/ensure-vault.ps1 ================================================ param ( $subscriptionName, $resourceGroupName, $vaultName } if ($subscriptionName) { Select-AzureRmSubscription -SubscriptionName $subscriptionName } Get-AzureRmKeyVault -vaultName $vaultName -ev notPresent -ea 0 if ($notPresent) { New-AzureRmKeyVault -VaultName $vaultName -ResourceGroupName $resourceGroupName -sku standard -EnabledForDeployment } ================================================ FILE: docs/custom-domain/deprecated/key-vault-setup/upload-cert.ps1 ================================================ param ( [Parameter(Mandatory=$true)] [string] $pfxFilePath, [Parameter(Mandatory=$true)] [string] $pfxPwFile, [Parameter(Mandatory=$true)] [string] $secretName, [Parameter(Mandatory=$true)] [string] $vaultName ) $pfxPw = [IO.File]::ReadAllText($pfxPwFile) $pfxContent = get-content $pfxFilePath -Encoding Byte $pfxContentEncoded = [System.Convert]::ToBase64String($pfxContent) $certBundleObj = @" { "data": "$pfxContentEncoded", "dataType" :"pfx", "password": "$pfxPw" } "@ $bundleObjBytes = [System.Text.Encoding]::UTF8.GetBytes($certBundleObj) $bundleObjEncoded = [System.Convert]::ToBase64String($bundleObjBytes) $secretValue = ConvertTo-SecureString -String $bundleObjEncoded -AsPlainText -Force Set-AzureKeyVaultSecret -Name $secretName -SecretValue $secretValue -VaultName $vaultName ================================================ FILE: docs/custom-domain/deprecated/registry-setup-deprecated.md ================================================ # How to use a custom domain for azure container registry Azure Container registries has a typical login url of the format `*.azurecr.io`. A customer might like to have a custom domain that associate with its own organization. The following is the guide on how to achieve that. ## Prerequisites For this example, we suppose that you want to associate `registry.contoso.com` with a Azure Container Registry. You would need the following: * Setup your organization's DNS zone `.contoso.com`. To create one on Azure, you can follow [this guide](https://docs.microsoft.com/en-us/azure/dns/dns-getstarted-create-dnszone-portal) * SSL certificate for `registry.contoso.com`, we would call it `contoso.pfx`. Put the password of the certificate to a file named `pwd.txt`. You would optionally also need your signing CA certificate's URL, such as `http://www.contoso.com/pki/ca.cert` * An instance of Azure Container Registry service as the backend. In this example we would assume it's `docker-registry-contoso.azurecr.io` ## Steps ### Upload your cert into Azure Key Vault Under [key-vault-setup/](key-vault-setup/), run the following: 1. (Optional) Create an Azure Key Vault, if you don't already have one: `.\ensure-vault.ps1 -subscriptionName -resourceGroupName -vaultName ` 2. Upload `contoso.pfx` to Azure Key Vault: `.\upload-cert.ps1 -pfxFilePath -pfxPwFile -secretName -vaultName ` ### Deploy and configure an Nginx Docker image on a new Azure VM Deploy via Azure Portal Alternatively, to deploy using powershell script, [docker-vm-deploy/](docker-vm-deploy/), do the following: 1. Edit [azuredeploy.parameters.json](docker-vm-deploy/azuredeploy.parameters.json) and populate all necessary parameters 2. Run the following script to create the new VM: `.\deploy.ps1 -resourceGroupName ` ### Configure DNS zone Configure the DNS zone so `registry.contoso.com` points to the Azure VM you have just created. If you are using an Azure DNS Zone. You can use the following command: `New-AzureRmDnsRecordSet -Name -RecordType CNAME -ZoneName -ResourceGroupName -Ttl -DnsRecords (New-AzureRmDnsRecordConfig -Cname )` ## Quick verification A simple way to test the setup is to call `docker login` to quickly confirm that the requests are properly forwarded: `docker login -u -p registry.contoso.com` ================================================ FILE: docs/deploy.sh ================================================ #!/usr/bin/env sh # abort on errors set -e npm install -g vuepress # build npm run docs:build # navigate into the build output directory cd ./gh-pages # if you are deploying to a custom domain # echo 'www.example.com' > CNAME git init && \ git config --global user.email @users.noreply.github.com && \ git config --global user.name "Git Hub Deploy Action" && \ git add . git commit -m 'deploy' git push -f git@github.com:${GH_REPOSITORY}.git master:gh-pages ================================================ FILE: docs/http-headers.md ================================================ # Azure Container Registry HTTP headers Azure container registries are compatible with a multitude of services and orchestrators. To help our customers, we'd like to understand which services in Azure, or outside of Azure, are issuing registry requests. To track the source services and agents from which ACR is used, we have started using the `HttpHeaders` field in the Docker `config.json` file. ## Header format ACR will parse headers using the following format: ```HTTP X-Meta-Source-Client: // ``` * `cloud`: Azure, Azure Stack, or other government- or country-specific Azure cloud. * `service`: The name of the service. * `optionalservicename`: An optional parameter for services with subservices, or for specifying a SKU. For example, Web Apps corresponds to `azure/app-service/web-apps`. The servicename can also be a hierarchy path, for example `azure/acr/connected-registry/instance-1`. ### Example ```JSON { "HttpHeaders": { "X-Meta-Source-Client": "azure/aks" }, "auths": { "myregistry.azurecr.io": {}, }, "credsStore": "wincred" } ``` ## Header values Partner services and orchestrators are encouraged to use specific header values to help with our telemetry. Users can also modify the value passed to the header if they so desire. The values we ask ACR partners to use when populating the `X-Meta-Source-Client` field are: | Cloud | Header | | ------------------ | ------------- | | Azure Public Cloud | `azure/` | | Azure Stack | `azurestack/` | | China (Mooncake) | `china/` | | Germany | `germany/` | | US DOD | `azureusdod/` | | US Gov | `azureusgov/` | | On Premise | `on-prem/` | | Service or Orchestrator name | Header | | ------------------------------ | ----------------------------------------- | | App Service - Logic Apps | `azure/app-service/logic-apps` | | App Service - Web Apps | `azure/app-service/web-apps` | | Azure Container Builder | `azure/acb` | | Azure Container Instance | `azure/aci` | | Azure Container Service | `azure/acs` | | Azure Kubernetes Service | `azure/aks` | | AKS Engine (Kubernetes) | `azure/aks-engine` | | Cluster API Azure (Kubernetes) | `azure/capz` | | Batch | `azure/batch` | | Cloud Console | `azure/cloud-console` | | Functions | `azure/functions` | | HDInsight | `azure/hdinsight` | | Internet of Things - Hub | `azure/iot/hub` | | Jenkins | `azure/jenkins` | | Machine Learning | `azure/ml` | | Service Fabric | `azure/service-fabric` | | VSTS | `azure/vsts` | | ACR Tasks | `azure/acr/tasks` | | ACR Connected Registry | `azure/acr/connected-registry/instance-1` | ================================================ FILE: docs/image-signing.md ================================================ # Azure Container Registry Image Signing Azure Container Registry supports image signing through [Docker Content Trust](https://docs.docker.com/notary/getting_started/). To push signed images to ACR, the following configuration is required: * The user or Service Principal used for automated signing must be assigned the `AcrImageSigner` role to your registry in addition to the `Owner`, `Contributor` roles for signing. Role assignment can be done by the following methods. * Azure Portal: Your registry -> Access Control (IAM) -> Add (Select `AcrImageSigner` for the Role). * Azure CLI: Find the resource id `id` of the registry by running ``` az acr show -n myRegistry ``` Then you can assign the `AcrImageSigner` role to a user ``` az role assignment create --scope resource_id --role AcrImageSigner --assignee user@example.com ``` or a service principle identified by its application ID ``` az role assignment create --scope resource_id --role AcrImageSigner --assignee 00000000-0000-0000-0000-000000000000 ``` * To pull trusted images, a `Reader` role is enough for normal users. No additional roles like an `AcrImageSigner` role are required. You can use Docker Client and Notary Client to interact trusted images with ACR. Detailed documentation can be found at [Content trust in Docker](https://docs.docker.com/engine/security/trust/content_trust/). ================================================ FILE: docs/image-transfer/ExportPipelines/azuredeploy.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "location": { "type": "string", "defaultValue": "[resourceGroup().location]", "metadata": { "description": "Location for all resources." } }, "registryName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your Azure Container Registry" } }, "exportPipelineName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your export pipeline." } }, "userAssignedIdentity": { "type": "string", "metadata": { "description": "The user assigned identity to be bound to the task run." }, "defaultValue": "" }, "targetUri": { "type": "string", "metadata": { "description": "The target URI of the export pipeline." } }, "keyVaultName": { "type": "string", "metadata": { "description": "The key vault name to obtain the target storage SAS token." } }, "sasTokenSecretName": { "type": "string", "metadata": { "description": "The key vault secret name to obtain the target storage SAS token." } }, "options": { "type": "array", "metadata": { "description": "The list of all options configured for the pipeline." }, "defaultValue": [] }, "storageAccessMode": { "type": "string", "defaultValue": "SasToken", "allowedValues": [ "SasToken", "ManagedIdentity" ], "metadata": { "description": "The storage access mode for the export pipeline. Use 'SasToken' to authenticate via a SAS token stored in Key Vault, or 'ManagedIdentity' to authenticate directly using an Entra managed identity." } } }, "variables": { "targetType": "AzureStorageBlobContainer", "systemIdentity": { "type": "SystemAssigned" }, "userIdentity": { "type": "UserAssigned", "userAssignedIdentities": { "[parameters('userAssignedIdentity')]": {} } }, "keyVaultSecretsPermissions": [ "get" ] }, "resources": [ { "type": "Microsoft.ContainerRegistry/registries/exportPipelines/", "name": "[concat(parameters('registryName'), '/', parameters('exportPipelineName'))]", "location": "[parameters('location')]", "apiVersion": "2025-06-01-preview", "identity": "[if(not(empty(parameters('userAssignedIdentity'))), variables('userIdentity'), variables('systemIdentity'))]", "properties": { "target": { "type": "[variables('targetType')]", "uri": "[parameters('targetUri')]", "keyVaultUri": "[concat(reference(resourceId('Microsoft.KeyVault/vaults', parameters('keyVaultName')), '2023-07-01').vaultUri, 'secrets/', parameters('sasTokenSecretName'))]", "storageAccessMode": "[parameters('storageAccessMode')]" }, "options": "[parameters('options')]" } }, { "condition": "[equals(parameters('storageAccessMode'), 'SasToken')]", "type": "Microsoft.KeyVault/vaults/accessPolicies", "name": "[concat(parameters('keyVaultName'), '/add')]", "apiVersion": "2023-07-01", "dependsOn": [ "[resourceId('Microsoft.ContainerRegistry/registries/exportPipelines', parameters('registryName'), parameters('exportPipelineName'))]" ], "properties": { "accessPolicies": [ { "tenantId": "[subscription().tenantId]", "objectId": "[if(not(empty(parameters('userAssignedIdentity'))), reference(parameters('userAssignedIdentity'), '2023-01-31').principalId, reference(resourceId('Microsoft.ContainerRegistry/registries/exportPipelines', parameters('registryName'), parameters('exportPipelineName')), '2025-06-01-preview', 'Full').identity.principalId)]", "permissions": { "secrets": "[variables('keyVaultSecretsPermissions')]" } } ] } } ] } ================================================ FILE: docs/image-transfer/ExportPipelines/azuredeploy.parameters.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", "contentVersion": "1.0.0.0", "parameters": { "registryName": { "value": "myregistry" }, "exportPipelineName": { "value": "myExportPipeline" }, "targetUri": { "value": "https://accountname.blob.core.windows.net/containername" }, "keyVaultName": { "value": "myvault" }, "sasTokenSecretName": { "value": "acrexportsas" }, "options": { "value": [ "OverwriteBlobs", "ContinueOnErrors" ] }, "storageAccessMode": { "value": "SasToken" } } } ================================================ FILE: docs/image-transfer/ImportPipelines/azuredeploy.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "location": { "type": "string", "defaultValue": "[resourceGroup().location]", "metadata": { "description": "Location for all resources." } }, "registryName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your Azure Container Registry" } }, "importPipelineName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your import pipeline." } }, "userAssignedIdentity": { "type": "string", "metadata": { "description": "The user assigned identity to be bound to the task run." }, "defaultValue": "" }, "sourceUri": { "type": "string", "metadata": { "description": "The source URI of the import pipeline." } }, "keyVaultName": { "type": "string", "metadata": { "description": "The key vault name to obtain the target storage SAS token." } }, "sasTokenSecretName": { "type": "string", "metadata": { "description": "The key vault secret name to obtain the target storage SAS token." } }, "sourceTriggerStatus": { "type": "string", "defaultValue": "Enabled", "metadata": { "description": "Indicates whether you want to enable the source trigger on the import pipeline." }, "allowedValues": [ "Enabled", "Disabled" ] }, "options": { "type": "array", "metadata": { "description": "The list of all options configured for the pipeline." }, "defaultValue": [] }, "storageAccessMode": { "type": "string", "defaultValue": "SasToken", "allowedValues": [ "SasToken", "ManagedIdentity" ], "metadata": { "description": "The storage access mode for the import pipeline. Use 'SasToken' to authenticate via a SAS token stored in Key Vault, or 'ManagedIdentity' to authenticate directly using an Entra managed identity." } } }, "variables": { "sourceType": "AzureStorageBlobContainer", "systemIdentity": { "type": "SystemAssigned" }, "userIdentity": { "type": "UserAssigned", "userAssignedIdentities": { "[parameters('userAssignedIdentity')]": {} } }, "keyVaultSecretsPermissions": [ "get" ] }, "resources": [ { "type": "Microsoft.ContainerRegistry/registries/importPipelines/", "name": "[concat(parameters('registryName'), '/', parameters('importPipelineName'))]", "location": "[parameters('location')]", "apiVersion": "2025-06-01-preview", "identity": "[if(not(empty(parameters('userAssignedIdentity'))), variables('userIdentity'), variables('systemIdentity'))]", "properties": { "source": { "type": "[variables('sourceType')]", "uri": "[parameters('sourceUri')]", "keyVaultUri": "[concat(reference(resourceId('Microsoft.KeyVault/vaults', parameters('keyVaultName')), '2023-07-01').vaultUri, 'secrets/', parameters('sasTokenSecretName'))]", "storageAccessMode": "[parameters('storageAccessMode')]" }, "trigger": { "sourceTrigger": { "status": "[parameters('sourceTriggerStatus')]" } }, "options": "[parameters('options')]" } }, { "condition": "[equals(parameters('storageAccessMode'), 'SasToken')]", "type": "Microsoft.KeyVault/vaults/accessPolicies", "name": "[concat(parameters('keyVaultName'), '/add')]", "apiVersion": "2023-07-01", "dependsOn": [ "[resourceId('Microsoft.ContainerRegistry/registries/importPipelines', parameters('registryName'), parameters('importPipelineName'))]" ], "properties": { "accessPolicies": [ { "tenantId": "[subscription().tenantId]", "objectId": "[if(not(empty(parameters('userAssignedIdentity'))), reference(parameters('userAssignedIdentity'), '2023-01-31').principalId, reference(resourceId('Microsoft.ContainerRegistry/registries/importPipelines', parameters('registryName'), parameters('importPipelineName')), '2025-06-01-preview', 'Full').identity.principalId)]", "permissions": { "secrets": "[variables('keyVaultSecretsPermissions')]" } } ] } } ] } ================================================ FILE: docs/image-transfer/ImportPipelines/azuredeploy.parameters.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", "contentVersion": "1.0.0.0", "parameters": { "registryName": { "value": "myregistry" }, "importPipelineName": { "value": "myImportPipeline" }, "sourceUri": { "value": "https://accountname.blob.core.windows.net/containername" }, "keyVaultName": { "value": "myvault" }, "sasTokenSecretName": { "value": "acrimportsas" }, "options": { "value": [ "OverwriteTags", "DeleteSourceBlobOnSuccess", "ContinueOnErrors" ] }, "storageAccessMode": { "value": "SasToken" } } } ================================================ FILE: docs/image-transfer/PipelineRun/PipelineRun-Export/azuredeploy.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "location": { "type": "string", "defaultValue": "[resourceGroup().location]", "metadata": { "description": "Location for all resources." } }, "registryName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your Azure Container Registry" } }, "pipelineRunName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your pipeline run." } }, "pipelineResourceId": { "type": "string", "metadata": { "description": "The resource ID of the pipeline to run." } }, "artifacts": { "type": "array", "metadata": { "description": "List of source artifacts to be transferred by the pipeline." }, "defaultValue": [] }, "sourceName": { "type": "string", "metadata": { "description": "Name of the existing blob for exported artifacts in your storage account, such as myblob." }, "defaultValue": "" }, "targetName": { "type": "string", "metadata": { "description": "Name you choose for the artifacts blob exported to your source storage account, such as myblob." }, "defaultValue": "" }, "catalogDigest": { "type": "string", "metadata": { "description": "The digest of the tar used to transfer the artifacts." }, "defaultValue": "" }, "forceUpdateTag": { "type": "string", "metadata": { "description": "How the pipeline run should be forced to recreate even if the pipeline run configuration has not changed." }, "defaultValue": "" } }, "variables": { "transferType": "AzureStorageBlob" }, "resources": [ { "type": "Microsoft.ContainerRegistry/registries/pipelineRuns/", "name": "[concat(parameters('registryName'), '/', parameters('pipelineRunName'))]", "location": "[parameters('location')]", "apiVersion": "2025-06-01-preview", "properties": { "request": { "pipelineResourceId": "[parameters('pipelineResourceId')]", "artifacts": "[parameters('artifacts')]", "source": { "type": "[if(not(empty(parameters('sourceName'))), variables('transferType'), '')]", "name": "[parameters('sourceName')]" }, "target": { "type": "[if(not(empty(parameters('targetName'))), variables('transferType'), '')]", "name": "[parameters('targetName')]" }, "catalogDigest": "[parameters('catalogDigest')]" }, "forceUpdateTag": "[parameters('forceUpdateTag')]" } } ] } ================================================ FILE: docs/image-transfer/PipelineRun/PipelineRun-Export/azuredeploy.parameters.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", "contentVersion": "1.0.0.0", "parameters": { "registryName": { "value": "myregistry" }, "pipelineRunName": { "value": "myPipelineRunExport" }, "pipelineResourceId": { "value": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.ContainerRegistry/registries/myRegistry/exportPipelines/myExportPipeline" }, "targetName": { "value": "myblob" }, "artifacts": { "value": [ "hello-world:latest", "sourceRepository@sha256:0000000000000000000000000000000000000000000000000000000000000000" ] } } } ================================================ FILE: docs/image-transfer/PipelineRun/PipelineRun-Import/azuredeploy.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "location": { "type": "string", "defaultValue": "[resourceGroup().location]", "metadata": { "description": "Location for all resources." } }, "registryName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your Azure Container Registry" } }, "pipelineRunName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your pipeline run." } }, "pipelineResourceId": { "type": "string", "metadata": { "description": "The resource ID of the pipeline to run." } }, "artifacts": { "type": "array", "metadata": { "description": "List of source artifacts to be transferred by the pipeline." }, "defaultValue": [] }, "sourceName": { "type": "string", "metadata": { "description": "Name of the existing blob for exported artifacts in your storage account, such as myblob." }, "defaultValue": "" }, "targetName": { "type": "string", "metadata": { "description": "Name you choose for the artifacts blob exported to your source storage account, such as myblob." }, "defaultValue": "" }, "catalogDigest": { "type": "string", "metadata": { "description": "The digest of the tar used to transfer the artifacts." }, "defaultValue": "" }, "forceUpdateTag": { "type": "string", "metadata": { "description": "How the pipeline run should be forced to recreate even if the pipeline run configuration has not changed." }, "defaultValue": "" } }, "variables": { "transferType": "AzureStorageBlob" }, "resources": [ { "type": "Microsoft.ContainerRegistry/registries/pipelineRuns/", "name": "[concat(parameters('registryName'), '/', parameters('pipelineRunName'))]", "location": "[parameters('location')]", "apiVersion": "2025-06-01-preview", "properties": { "request": { "pipelineResourceId": "[parameters('pipelineResourceId')]", "artifacts": "[parameters('artifacts')]", "source": { "type": "[if(not(empty(parameters('sourceName'))), variables('transferType'), '')]", "name": "[parameters('sourceName')]" }, "target": { "type": "[if(not(empty(parameters('targetName'))), variables('transferType'), '')]", "name": "[parameters('targetName')]" }, "catalogDigest": "[parameters('catalogDigest')]" }, "forceUpdateTag": "[parameters('forceUpdateTag')]" } } ] } ================================================ FILE: docs/image-transfer/PipelineRun/PipelineRun-Import/azuredeploy.parameters.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", "contentVersion": "1.0.0.0", "parameters": { "registryName": { "value": "myregistry" }, "pipelineRunName": { "value": "myPipelineRunImport" }, "pipelineResourceId": { "value": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.ContainerRegistry/registries/myRegistry/importPipelines/myImportPipeline" }, "sourceName": { "value": "myblob" } } } ================================================ FILE: docs/image-transfer/README.md ================================================ # ACR Transfer - Sample ARM Templates This directory contains Azure Resource Manager (ARM) templates for ACR Transfer, a feature for transferring container images and OCI artifacts between Azure Container Registries in disconnected environments. ## Templates | Directory | Description | |-----------|-------------| | [ExportPipelines](./ExportPipelines) | Creates an ExportPipeline resource for exporting artifacts from a container registry to a storage account blob container. | | [ImportPipelines](./ImportPipelines) | Creates an ImportPipeline resource for importing artifacts from a storage account blob container into a container registry. | | [PipelineRun/PipelineRun-Export](./PipelineRun/PipelineRun-Export) | Creates a PipelineRun resource to trigger an export pipeline. | | [PipelineRun/PipelineRun-Import](./PipelineRun/PipelineRun-Import) | Creates a PipelineRun resource to trigger an import pipeline. | ## Documentation For complete documentation including prerequisites, setup instructions, and usage examples, see: **[ACR Transfer Documentation](https://aka.ms/acr/transfer)** The documentation covers: - What is ACR Transfer and how it works - Storage access modes (SAS Token vs Managed Identity) - Prerequisites and setup - Step-by-step guides for Azure CLI and ARM templates - Troubleshooting ## Quick Start These templates require: - Azure Container Registry Premium tier - Storage accounts with blob containers - API version `2025-06-01-preview` or later - For detailed prerequisites and instructions, refer to the [official documentation](https://aka.ms/acr/transfer) ================================================ FILE: docs/integration/CircleCI.md ================================================ # Using Azure Container Registry With CircleCI For configuration of your Docker build using CircleCI, refer [https://circleci.com/docs/1.0/docker/](https://circleci.com/docs/1.0/docker/) Here is a sample `circle.yml` file that can be used with Azure Container Registry using three environment variables as a part of the build, that builds and pushes an image to the registry. ``` yml machine: services: - docker dependencies: override: - docker info - docker build --rm=false -t $REGISTRY_HOST/circleci . test: override: - docker run -d hello-world deployment: hub: branch: master commands: - docker login -e $DOCKER_USER -u $DOCKER_USER -p $DOCKER_PASSWORD $REGISTRY_HOST - docker push $REGISTRY_HOST/circleci ``` | Environment Variable | Description | | --------------------|-------------| | REGISTRY_HOST | Login server host for your Registry | | DOCKER_USER | Service principal or admin user for the registry | | DOCKER_PASSWORD | User's password that would be used for docker login | ================================================ FILE: docs/integration/change-analysis/README.md ================================================ --- type: post title: "Change Analysis" tags: [developers, teleport] date: 2019-11-13 17:00:00 author: Sajay Antony --- # Change Analysis with ACR You can enable change analysis service on your subscription in the [Azure Portal](https://docs.microsoft.com/en-us/azure/azure-monitor/app/change-analysis) or using the following command ```sh az provider register -n 'Microsoft.ChangeAnalysis' ``` Once this has been enabled you can view changes on your registry. For e.g. you can see that the `adminUserEnabled` boolean has been changed on the registry. ![](./change-analysis-screenshot.png) ================================================ FILE: docs/integration/github-actions/Dockerfile ================================================ FROM hello-world ================================================ FILE: docs/integration/github-actions/github-actions.md ================================================ # Using Azure Container Registry With GitHub Actions For creating workflows for your GitHub repository using GitHub Actions, please refer [https://developer.github.com/actions/](https://developer.github.com/actions/). The following `main.workflow` file defines a workflow that uses the built-in Docker Actions to login to the Azure Container Registry, build and push an image to the registry. You also needs to define three secrets to pass the registry access information to the Actions. | Secret/Environment Variable | Description | | --------------------|---------------------------------------------------------------| | DOCKER_REGISTRY_URL | Login server url for the registry, eg, myregistry.azurecr.io | | DOCKER_USERNAME | Service principal App ID or admin username for the registry | | DOCKER_PASSWORD | Service principal password or admin password for the registry | main.workflow --- ``` workflow "DockerFlowExample" { resolves = ["Docker Push"] on = "push" } action "Docker Login" { uses = "actions/docker/login@8cdf801b322af5f369e00d85e9cf3a7122f49108" secrets = ["DOCKER_REGISTRY_URL", "DOCKER_USERNAME", "DOCKER_PASSWORD"] } action "Docker Build" { uses = "actions/docker/cli@8cdf801b322af5f369e00d85e9cf3a7122f49108" needs = ["Docker Login"] args = ["build", "-t", "$DOCKER_REGISTRY_URL/hello-world:latest", "docs/integration/github-actions"] secrets = ["DOCKER_REGISTRY_URL"] } action "Docker Push" { uses = "actions/docker/cli@8cdf801b322af5f369e00d85e9cf3a7122f49108" needs = ["Docker Build"] args = ["push", "$DOCKER_REGISTRY_URL/hello-world:latest"] secrets = ["DOCKER_REGISTRY_URL"] } ``` ================================================ FILE: docs/integration/github-actions/main.workflow ================================================ workflow "DockerFlowExample" { resolves = ["Docker Push"] on = "push" } action "Docker Login" { uses = "actions/docker/login@8cdf801b322af5f369e00d85e9cf3a7122f49108" secrets = ["DOCKER_REGISTRY_URL", "DOCKER_USERNAME", "DOCKER_PASSWORD"] } action "Docker Build" { uses = "actions/docker/cli@8cdf801b322af5f369e00d85e9cf3a7122f49108" needs = ["Docker Login"] args = ["build", "-t", "$DOCKER_REGISTRY_URL/hello-world:latest", "docs/integration/github-actions"] secrets = ["DOCKER_REGISTRY_URL"] } action "Docker Push" { uses = "actions/docker/cli@8cdf801b322af5f369e00d85e9cf3a7122f49108" needs = ["Docker Build"] args = ["push", "$DOCKER_REGISTRY_URL/hello-world:latest"] secrets = ["DOCKER_REGISTRY_URL"] } ================================================ FILE: docs/move-repositories-to-new-registry/README.md ================================================ # How to move your repositories to a new registry? When users create a container registry backed by a storage account, the repositories are pushed under a blob container that is named after the registry within that storage account. In the example below we have two registries in a resource group associated with the same storage account. ![alt Registries](move-repositories-to-new-registry-sourceregistry.png) Here using [Azure Storage Explorer](http://storageexplorer.com/) we can see that each registry gets a container with the corresponding registry name. ![alt Registries container data](move-repositories-to-new-registry-sourceregistry-data.png) All you need to do is move the blobs from one container to the other if you want to copy over the repositories. If you do not care about the old container registry then you can just rename the blob container and delete the registry since deleting a registry does not delete the associate data in your storage account. ![alt Copy blogs](move-repositories-to-new-registry-sourceregistry-copy.png) > Make sure you paste that into the target registry's blob container and you should be able to pull your images from the new registry. ================================================ FILE: docs/package.json ================================================ { "scripts": { "docs:dev": "vuepress dev .", "docs:build": "vuepress build ." } } ================================================ FILE: docs/preview/abac-repo-permissions/README.md ================================================ # Microsoft Entra attribute-based access control (ABAC) for repository permissions (Preview) ACR ABAC for Microsoft Entra-based repository permissions is currently in public preview (Portal as of May 9th, 2025, Azure CLI as of May 19th, 2025). Documentation is available at [https://aka.ms/acr/auth/abac](https://aka.ms/acr/auth/abac). ================================================ FILE: docs/preview/artifact-streaming/README.md ================================================ > [!NOTE] > This feature is available as a public preview. ## About Azure Container Registry (ACR) and Azure Kubernetes Service (AKS) is proud to announce the public preview for artifact streaming. Artifact streaming for AKS provides customers the ability to accelerate containerized workloads in the cloud by dramatically reducing the overall startup time. Artifact streaming will empower customers to scale resources on AKS seamlessly by not having to wait for long pull times for each Kubernetes pod. Customers with Linux amd64 container images are supported during this public preview and have plans to support Windows and arm64 container images in the future. We can’t wait to hear what our customers have to think and look forward to hearing feedback on further improving this feature. Get started today at [aka.ms/acr/artifact-streaming](https://aka.ms/acr/artifact-streaming). ================================================ FILE: docs/preview/connected-registry/README.md ================================================ # ACR connected registry (Private Preview) instructions This article provides guidance for use of the connected registry feature of Azure Container Registry (ACR) during the limited preview. To request preview access, submit your contact details using this [form](https://forms.office.com/Pages/ResponsePage.aspx?id=v4j5cvGGr0GRqy180BHbR1OsLxas9SdIhfyFenqqkolUMkFKMTdDSU45SFQzU0o0WUNROVAySkRINy4u) and we will get in touch with you. ## Available Regions During limited preview period, the connected registry functionality is available in dedicated stamps in the following Azure regions: - Asia East - EU West - US East To use the connected registry functionality, your ACR must be deployed in one of the above three regions and in a supported deployment stamp. To check the stamp where your ACR is deployed to, use the following command: ```azurecli nslookup .azurecr.io ``` The stamp name is one of the aliases returned by the above command. Currently, connected registries are supported in the following stamps: - East Asia: `ea-1.fe.azcr.io` - EU West: `weu-3.fe.azcr.io` - East US: `eus-2.fe.azcr.io` > **IMPORTANT** > If your ACR doesn't have the above alias respective to your region, the connected registry functionality will not be available. You can create an issue as described below, and we will migrate your registry to the correct stamp. ## Known Limitations Here is a list of known limitations for the connected registry functionality in limited preview: - Number of tokens and scopemaps is limited to 20K for a single ACR. This indirectly limits the number of connected registries for an ACR registry because every connected registry needs a sync and client token. - Number of repository permissions in a scope map is limited to 500. - Number of clients for the connected registry is currently limited to 20. - Image locking through repository/manifest/tag metadata is not currently supported for connected registries. - Repository delete is not supported on the connected registry using registry mode. - Audit logs for connected registries are currently not supported. - Garbage collection of deleted artifacts on connected registries is currently not supported. - Connected registry is coupled with home region data endpoint and its automatic migration for geo replications is not supported. - Deletion of a connected registry needs manual removal of the containers on premises as well as removal of the respective scope map or tokens in the cloud. - Connected registry sync limitations are as follows: - For continuous sync: - `minMessageTtl` is 1 day - `maxMessageTtl` is 90 days - For occasionally connected scenarios, where you want to specify sync window: - `minSyncWindow` is 1 hr - `maxSyncWindow` is 7 days ## Set Up and Configuration In limited preview, the connected registry targets IoT scenarios. Below are links to the preliminary documentation you can use to set up and configure the connected registry with your IoT Edge infrastructure. - [Overview of connected registry](./intro-connected-registry.md) - [Understand access to a connected registry](./overview-connected-registry-access.md) - [Using connected registry with Azure IoT Edge](./overview-connected-registry-and-iot-edge.md) - [Quickstart: Create a connected registry using Azure Container Registry CLI commands](./quickstart-connected-registry-cli.md) - [Quickstart: Deploy a connected registry to an IoT Edge device](./quickstart-deploy-connected-registry-iot-edge-cli.md) - [Quickstart: Deploy a connected registry to an nested IoT Edge device](./quickstart-deploy-connected-registry-nested-iot-edge-cli.md) - [Quickstart: Pull images from a connected registry](./quickstart-pull-images-from-connected-registry.md) - [Quickstart: View connected registry repositories and tags](./quickstart-view-connected-registry-repos-and-tags.md) ## Release Notes Reference [Release Notes](./release-notes.md) for information on the changes included in each release of the connected registry runtime. ## Troubleshooting We keep a list of troubleshooting steps for known issues. Those are available on the [Troubleshooting](./troubleshooting.md) page. ## Reporting Issues and Asking for Help To report issues, [create a new bug](https://github.com/Azure/acr/issues/new?assignees=toddysm&labels=connected-registry,bug&template=bug_report.md&title=) in this repository. If you need help with installation, set up, or use, you can [submit a help request](https://github.com/Azure/acr/issues/new?assignees=toddysm&labels=help%20wanted&template=bug_report.md&title=) in this repository. ================================================ FILE: docs/preview/connected-registry/connected-registry-error-codes.md ================================================ --- title: Connected registry error code reference description: Details about error codes shown in the statusDetails property of a connected registry resource. For each error, possible solutions are listed. ms.topic: troubleshooting ms.date: 09/29/2021 ms.author: jeburke author: jaysterp --- # Connected Registry Error Code Reference This article helps you troubleshoot error codes you might encounter in the `StatusDetails` property of a connected registry. ## Connection State The connection state of a connected registry indicates the current overall health status of the deployed connected registry instance. The possible connection states are defined as follows: | Connection State | Description | |--------------|-----------| | Online | The connected registry instance is currently connected with the cloud and in a healthy state. | | Offline | The connected registry instance is currently disconnected from the cloud. | | Unhealthy | The connected registry instance is currently connected with the cloud, but it is reporting critical errors. Reference the `StatusDetails` property to view the corresponding errors. | Use the [az acr connected-registry show][az-acr-connected-registry-show] command to view the current connection state of your connected registry. ```azurecli az acr connected-registry show \ --registry MyRegistry \ --name MyConnectedRegistry \ --output table ``` You should see a response as follows. Note that this connected registry has a connection state of `Unhealthy`. ``` NAME MODE CONNECTION STATE PARENT LOGIN SERVER LAST SYNC (UTC) SYNC SCHEDULE SYNC WINDOW --------------------- -------- ------------------ -------- -------------- ------------------------- --------------- ------------- MyConnectedRegistry ReadOnly Unhealthy 2021-09-29T12:59:00+00:00 * * * * * ``` ## Status Details Format When your connected registry has a connection state of `Unhealthy` you can run the [az acr connected-registry show][az-acr-connected-registry-show] command to view the list of status details. ```azurecli az acr connected-registry show \ --registry MyRegistry \ --name MyConnectedRegistry --query statusDetails ``` The `StatusDetails` property provides a list of error objects, each with the following format: ```json { "code": "Error code", "correlationId": "CorrelationId of the error on the on-premises connected registry instance", "description": "Description corresponding to this error", "timestamp": "Timestamp corresponding to this error", "type": "Component of the connected registry instance corresponding to the error" } ``` Every time the connected registry instance syncs with the cloud, these status details are updated. When the connected registry no longer has status details listed, it is considered healthy and its connection state is transitioned from `Unhealthy` to `Online`. Once the connected registry is no longer connected to internet, its connection state will transition to `Offline`. # Error Codes This section lists the possible codes you may see in the `StatusDetails` property of a connected registry, which indicate critical errors. For each error, possible solutions are listed. You can view the status details of your connected registry by running the [az acr connected-registry show][az-acr-connected-registry-show] command. ```azurecli az acr connected-registry show \ --registry MyRegistry \ --name MyConnectedRegistry --query statusDetails ``` ## DiskError This is the default error code when the connected registry is unable to create, write, or delete a file on the local disk. There are a few scenarios that may cause a `DiskError` code. Please reference below for possible scenarios and remediations. ### Insufficient Permissions Sample status detail: ```json [ { "code": "DiskError", "correlationId": "73a46395-b89b-49c7-5621-d54e8b1574b5", "description": "Access to the path '/var/acr/data/registry/dummy.txt' is denied.", "timestamp": "2021-09-16T01:17:45.394512+00:00", "type": "Disk" } ] ``` This status `description` indicates that the connected registry was unable to write any file to the disk because it did not have sufficient permissions. *Potential solution:* Ensure that the host storage path used to run the connected registry container gives sufficient access to the container user. In the sample above, this path is `/var/acr/data/registry`. Update the permissions of the host system directory so that the user profile for your container has read, write, and execute access. By default, docker containers run as root. If the container is run as a non-root user, please ensure that user has the above permissions. ### No Disk Space Available on the Device Sample status detail: ```json [ { "code": "DiskError", "correlationId": "73a46395-b89b-49c7-5621-d54e8b1574b5", "description": "No space left on device.", "timestamp": "2021-09-16T01:17:45.394512+00:00", "type": "Disk" } ] ``` This status `description` indicates that the connected registry was unable to write any file to the disk because it did not have sufficient permissions. *Potential solution:* Connected registry container logs are integrated with docker. By default, docker does not set container log size limits. Over time, these logs can take up much of your host's storage capacity. If your disk is out of space then you can place limits on the container logs retained by docker. See the following options for limiting storage space used by connected registry logs. #### Option 1: Place global log limits for all containers on the host If running the connected registry on docker, create or update the docker daemon file `/etc/docker/daemon.json` to add logging limits accross all containers on this host. The following example sets the log driver to `json-file` and sets `max-size` and `max-file` properties in order to enable automatic log rotation. If the configured threshold is reached, docker will remove the oldest log file first in order to make space for new logs. ```json { "log-driver": "json-file", "log-opts": { "max-size": "10m", "max-file": "3" } } ``` Restart the container engine in order for the configuration to take effect. #### Option 2: Place log limits only for the connected registry container You can also update the log level of the connected registry container only. Add the following flags to your `docker run` command: `--log-driver json-file --log-opt max-size=10m --log-opt max-file=3` Please reference how to set module-level log restrictions when running your connected registry on [IoT Edge](https://docs.microsoft.com/en-us/azure/iot-edge/production-checklist?view=iotedge-2020-11#option-adjust-log-settings-for-each-container-module). #### Option 3: Update log verbosity on your connected registry After making the above docker configuration changes to free up disk space, you can also update the connected registry resource in order to limit logs sent to docker. By default, connected registries are created with `Information` log level. To minimize the verbosity of the logs stored, set the log level to `Warning`, `Error`, or `None`. Use the connected registry [az acr connected-registry update][az-acr-connected-registry-update] command to update the log level. `az acr connected-registry update -r MyRegistry -n MyConnectedRegistry --log-level Error` ```azurecli az acr connected-registry update \ --registry MyRegistry \ --name MyConnectedRegistry \ --log-level Error ``` The configuration will take effect on-premises during the next scheduled sync with the cloud. [az-acr-connected-registry-show]: https://docs.microsoft.com/cli/azure/acr/connected-registry?view=azure-cli-latest#az_acr_connected_registry_show [az-acr-connected-registry-update]: https://docs.microsoft.com/cli/azure/acr/connected-registry?view=azure-cli-latest#az_acr_connected_registry_update ================================================ FILE: docs/preview/connected-registry/intro-connected-registry.md ================================================ --- title: What is connected registry description: Overview of the connected registry feature of Azure container registry introducing the main concepts author: toddysm ms.author: memladen ms.service: container-registry ms.topic: overview ms.date: 01/11/2021 --- # What is connected registry? In this article, you learn about the connected registry feature of Azure container registry. A connected registry is an on-premise replica that synchronizes container images and other OCI artifacts with your cloud-based Azure container registry. Use a connected registry to help speed up access to registry artifacts on-premise and build advanced scenarios using [nested IoT Edge](https://docs.microsoft.com/azure/iot-edge/tutorial-nested-iot-edge), [Azure Stack](https://docs.microsoft.com/azure-stack), or [Azure Arc enabled Kubernetes](https://docs.microsoft.com/azure/azure-arc/kubernetes/overview). ## Scenarios A cloud-based Azure container registry provides features including geo-replication, integrated security, Azure-managed storage, and integration with Azure development and deployment pipelines. At the same time, customers are extending their cloud investments to their on-premises and field solutions. To run with the required performance and reliability in on-premises or remote environments, container workloads need container images and related artifacts to be available nearby. The connected registry provides a performant on-premises registry solution that regularly synchronizes content with a cloud-based Azure container registry. Scenarios for a connected registry include: * Connected factories * Point-of-sale retail locations * Shipping, oil-drilling, mining, and other occasionally connected environments ## How the connected registry works? The following picture shows some of the deployment models for the connected registry. ![Connected Registry Overview](media/connected-registry/connected-registry-overview.svg) * **Deployment** - Each connected registry is a resource you manage using a cloud-based Azure container registry. The top parent in the connected registry hierarchy is an Azure container registry in any of the Azure clouds or in a private deployment of [Azure Stack Hub](https://docs.microsoft.com/azure-stack/operator/azure-stack-overview). You use Azure tools to install the connected registry on a server or device on your premises. It can be deployed on any environment that supports container workloads on-premises including [Azure IoT Edge](https://docs.microsoft.com/azure/iot-edge/tutorial-nested-iot-edge), [Azure Stack Hub](https://docs.microsoft.com/azure-stack/operator/azure-stack-overview), and [Azure Arc enabled Kubernetes](https://docs.microsoft.com/azure/azure-arc/kubernetes/overview). * **Content synchronization** - The connected registry regularly accesses the cloud registry to synchronize container images and OCI artifacts. It can also be configured to synchronize just a subset of the repositories from the cloud registry or to synchronize only during certain intervals to reduce traffic between the cloud and the premises. * **Modes** - The connected registry can work in one of two modes: - The default *registry* mode allows clients to pull as well as push artifacts to the connected registry. Artifacts that are pushed to the connected registry will be synchronized with the cloud registry. The *registry* mode is useful when a local development is in place. The images are pushed to the local connected registry and from there synchronized to the cloud. - When the connected registry is in _mirror_ mode, clients may only pull artifacts. This configuration is used for nested IoT Edge scenarios or when the client has limited capabilities such as an ARM device. The *mirror* mode is useful for all other scenarios where clients need to pull a container image to operate. * **Registry hierarchy** - Each connected registry must be connected to a parent. The top parent is the cloud registry. For hierarchical scenarios, you can nest connected registries in *registry* or *mirror* mode. The parent connected to the cloud registry can operate in both *registry* or *mirror* mode. Children registries must be compatible with their parent capabilities: thus, both *registry* and *mirror* can be children of a connected registry operating in *registry* mode, but only *mirror* can be a child of a connected registry operating in *mirror* mode. ## Client access On-premises clients use standard tools such as the Docker CLI to push or pull content from a connected registry. To manage client access, you create Azure container registry [tokens][repository-scoped-permissions] for access to each connected registry. You can scope the client tokens for pull or push access to one or more repositories in the registry. Each connected registry also needs to regularly communicate with its parent registry. For this purpose, the registry is issued a synchronization token (*sync token*) by the cloud registry. This token is used to authenticate with its parent registry for synchronization and management operations. For more information about authentication and authorization for connected registries, see [Manage access to a connected registry][overview-connected-registry-access]. ## Next steps In this overview, you learned about the connected registry and some basic concepts. Continue to the one of the following articles to learn about specific scenarios where connected registry can be utilized. > [Overview: Connected registry access][overview-connected-registry-access] > [Overview: Connected registry and IoT Edge][overview-connected-registry-and-iot-edge] [overview-connected-registry-access]:overview-connected-registry-access.md [overview-connected-registry-and-iot-edge]:overview-connected-registry-and-iot-edge.md [repository-scoped-permissions]: https://docs.microsoft.com/azure/container-registry/container-registry-repository-scoped-permissions ================================================ FILE: docs/preview/connected-registry/overview-connected-registry-access.md ================================================ --- title: Understand access to a connected registry description: Introduction to token-based authentication and authorization for connected registries in Azure Container Registry author: toddysm ms.author: memladen ms.service: container-registry ms.topic: overview ms.date: 01/13/2021 --- # Understand access to a connected registry To access and manage a [connected registry](intro-connected-registry.md), currently only ACR [token-based authentication](container-registry-repository-scoped-permissions.md) is supported. As shown in the following image, two different types of tokens are used by each connected registry: * One or more *client tokens*, which on-premise clients use to authenticate with a connected registry and push or pull images and artifacts to or from it. * A *sync token* used by each connected registry to access its parent and synchronize images and artifacts with it. ![Connected Registry Authentication Overview](media/connected-registry/connected-registry-authentication-overview.svg) > [!IMPORTANT] > Store token passwords for each connected registry in a safe location. After they are created, token passwords can't be retrieved. You can regenerate token passwords at any time. ## Client tokens To manage client access to a connected registry, you create tokens scoped for actions on one or more repositories. After creating a token, you configure the connected registry to accept the token by using the `az acr connected-registry update` command. A client can then use the token credentials to access a connected registry endpoint - for example, to use Docker CLI commands to pull or push images to the connected registry. Your options for configuring client token actions depend on whether the connected registry allows both push and pull operations or functions as a pull-only mirror. * A connected registry in the default *registry* mode allows both pull and push operations, so you can create a token that allows actions to both *read* and *write* repository content in that registry. * For a connected registry in *mirror* mode, client tokens can only allow actions to *read* repository content. ### Manage client tokens Update client tokens, passwords, or scope maps as needed by using [az acr token](https://docs.microsoft.com/en-us/cli/azure/acr/token?view=azure-cli-latest) and [az acr scope-map](https://docs.microsoft.com/en-us/cli/azure/acr/scope-map?view=azure-cli-latest) commands. Client token updates are propagated automatically to the connected registries that accept the token. ## Sync token Each connected registry uses a *sync token* to authenticate with its parent - which could be another connected registry or the cloud registry. The connected registry automatically uses this token when synchronizing content with the parent or performing other updates. * The *sync token* is generated automatically when you create the connected registry resource. You must run the [az acr connected-registry install renew-credentials][az-acr-install-renew-credentials] command to generate the passwords. * Include the *sync token* credentials in the configuration used to deploy the connected registry on premises. * By default, the *sync token* is granted permission to synchronize selected repositories with its parent. You must provide existing *sync token* or one or more repositories to sync when you create the connected registry resource. * It also has permissions to *read* and *write* synchronization messages on a gateway used to communicate with the connected registry parent. These messages control the synchronization schedule and manage other updates between the connected registry and its parent. ### Manage sync token Update sync tokens, passwords, or scope maps as needed by using [az acr token](https://docs.microsoft.com/en-us/cli/azure/acr/token?view=azure-cli-latest) and [az acr scope-map](https://docs.microsoft.com/en-us/cli/azure/acr/scope-map?view=azure-cli-latest) commands. Sync token updates are propagated automatically to the connected registry. Follow the standard practices of rotating passwords when updating the sync token. > [!WARNING] > Take care not to delete a sync token. Doing so will permanently prevent communication with the corresponding connected registry. You can disable a connected registry by setting the status of the sync token to `disabled`. ## Registry endpoints Token credentials for connected registies are scoped to access specific registry endpoints. A client token accesses the connected registry's endpoint. The connected registry endpoint is the login server URI, which is typically the IP address of the server or device that hosts it. A sync token accesses the endpoint of the parent registry, which is either another connected registry endpoint or the cloud registry itself. When scoped to access the cloud registry, the sync token needs to reach two registry endpoints: - The fully qualified login server name, for example, `contoso.azurecr.io`. This endpoint is used for authentication. - A fully qualified regional [data endpoint](https://docs.microsoft.com/azure/container-registry/container-registry-firewall-access-rules#enable-dedicated-data-endpoints) for the cloud registry, for example, `contoso.westus2.data.azurecr.io`. This endpoint is used to exchange messages with the connected registry for synchronization purposes. ## Next steps Continue to the one of the following articles to learn about specific scenarios where connected registry can be utilized. > [Overview: Connected registry and IoT Edge][overview-connected-registry-and-iot-edge] [az-acr-install-renew-credentials]: https://docs.microsoft.com/cli/azure/acr/connected-registry/install?view=azure-cli-latest#az_acr_connected_registry_install_renew_credentials [overview-connected-registry-and-iot-edge]:overview-connected-registry-and-iot-edge.md [repository-scoped-permissions]: https://docs.microsoft.com/azure/container-registry/container-registry-repository-scoped-permissions ================================================ FILE: docs/preview/connected-registry/overview-connected-registry-and-iot-edge.md ================================================ --- title: Using connected registry with Azure IoT Edge description: Overview of the connected registry use in hierarchical IoT Edge scenario author: toddysm ms.author: memladen ms.service: container-registry ms.topic: overview ms.date: 01/13/2021 --- # Using connected registry with Azure IoT Edge In this article, you learn how you can use the connected registry feature of Azure container registry (ACR) in hierarchical IoT Edge scenarios. The connected registry can be deployed as an IoT Edge module and play an essential role in serving container images required by the devices in the hierarchy. ## What is a hierarchical IoT Edge deployment? Azure IoT Edge allows you to deploy IoT Edge devices across networks organized in hierarchical layers. Each layer in a hierarchy is a gateway device that handles messages and requests from devices in the layer beneath it. You can structure a hierarchy of devices so that only the top layer has connectivity to the cloud, and the lower layers can only communicate with adjacent north and south layers. This network layering is the foundation of most industrial networks, which follow the [ISA-95 standard](https://en.wikipedia.org/wiki/ANSI/ISA-95). You can learn how to create a hierarchy of IoT Edge devices in the following tutorial [Tutorial: Create a hierarchy of IoT Edge devices (Preview)][tutorial-nested-iot-edge] ## How to use connected registry in hierarchical IoT Edge scenario? The picture below shows how the connected registry can be used to support the hierarchical deployment of IoT Edge. ![Connected Registry and Hierarchical IoT Edge Deployments](media/connected-registry/connected-registry-iot-edge-overview.svg) In the above architecture, the solid gray lines show the actual network flow while the dashed lines show the logical communication between components and the connected registries. In this example, the top layer of the architecture, *Layer 5: Enterprise Network*, is managed by IT and has access to the Internet. The top layer can access the container registry for Contoso in Azure cloud. The connected registry is deployed as an IoT Edge module on the IoT Edge VM and can directly communicate with the cloud registry to pull and push images and artifacts. The connected registry in the picture is show as working in a *registry* mode. Clients of this connected registry can pull and push images and artifacts to it. Pushed images will be synchronized with the cloud registry. If pushes are not required in that layer, the connected registry can be changed to operate in *mirror* mode. The lower layer, *Layer 4: Site Business Planning and Logistics*, is configured to communicate only with *Layer 5*. Thus, when deploying the IoT Edge VM on *Layer 4* it needs to pull the module images from the connected registry on *Layer 5* instead. You can also deploy a connected registry working in a *mirror* mode to serve the layers below. This is illustrated with the IoT Edge VM on *Layer 3: Industrial Security Zone*. That VM must pull the module images from the connected registry on *Layer 4*. If clients on lower layers need to be served, a connected registry in *mirror* mode can be deployed on *Layer 3* and so on. In this architecture, the connected registries deployed on each layer are configured to synchronize the images with the connected registry on the layer above. The connected registries are deployed as IoT Edge modules and leverage the IoT Edge mechanisms for deployment and network routing. ## Next steps In this overview, you learned about the use of the connected registry in hierarchical IoT Edge scenarios. Continue to the one of the following articles to learn how to configure and deploy a connected registry to your IoT Edge device. > [Quickstart: Create connected registry using the CLI][quickstart-connected-registry-cli] > [Quickstart: Deploy a connected registry to an IoT Edge device][overview-connected-registry-and-iot-edge] > [Quickstart: Deploy connected registry on nested IoT Edge device][quickstart-pull-images-from-connected-registry] [quickstart-connected-registry-cli]:quickstart-connected-registry-cli.md [overview-connected-registry-and-iot-edge]:quickstart-deploy-connected-registry-iot-edge-cli.md [tutorial-nested-iot-edge]: https://docs.microsoft.com/azure/iot-edge/tutorial-nested-iot-edge?view=iotedge-2020-11&tabs=azure-portal [quickstart-connected-registry-nested]: quickstart-deploy-connected-registry-nested-iot-edge-cli.md ================================================ FILE: docs/preview/connected-registry/quickstart-connected-registry-cli.md ================================================ --- title: Quickstart - Create connected registry using the CLI description: Use Azure Container Registry CLI commands to create a connected registry resource. ms.topic: quickstart ms.date: 12/03/2020 ms.author: memladen author: toddysm ms.custom: --- # Quickstart: Create a connected registry using Azure Container Registry CLI commands In this quickstart, you use [Azure Container Registry][container-registry-intro] (ACR) commands to create a connected registry resource in Azure. The connected registry feature of ACR allows you to deploy a registry on your premises and synchronize images between the ACR and your premises. It brings the container images and OCI artifacts closer to your container workloads on premises and increases their acquisition performance. You can review the [ACR connected registry introduction](intro-connected-registry.md) for details about the connected registry feature of Azure Container Registry. In this quick start guide, you will create two connected registry resources - one that allows artifact pull and push functionality and one that allows only artifact pull functionality. ## Prerequisites - Use [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/quickstart) using the bash environment. [![https://docs.microsoft.com/en-us/azure/includes/media/cloud-shell-try-it/hdi-launch-cloud-shell.png](https://docs.microsoft.com/en-us/azure/includes/media/cloud-shell-try-it/hdi-launch-cloud-shell.png)](https://shell.azure.com/) - If you prefer, [install](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) the Azure CLI to run CLI reference commands. - If you're using a local install, sign in with Azure CLI by using the [az login](https://docs.microsoft.com/en-us/cli/azure/reference-index#az_login) command. To finish the authentication process, follow the steps displayed in your terminal. See [Sign in with Azure CLI](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli) for additional sign-in options. - When you're prompted, install Azure CLI extensions on first use. For more information about extensions, see [Use extensions with Azure CLI](https://docs.microsoft.com/en-us/cli/azure/azure-cli-extensions-overview). - Run [az version](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_version) to find the version and dependent libraries that are installed. To upgrade to the latest version, run [az upgrade](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_upgrade). - The Azure CLI commands in this article are formatted for the Bash shell. If you're using a different shell like PowerShell or Command Prompt, you may need to adjust line continuation characters or variable assignment lines accordingly. This article uses variables to minimize the amount of command editing required. ## Create a resource group If you don't already have a container registry, first create a resource group with the [az group create][az-group-create] command. An Azure resource group is a logical container into which Azure resources are deployed and managed. The following example creates a resource group named *myResourceGroup* in the *eastus* location. ```azurecli az group create --name myResourceGroup --location eastus ``` ## Create a container registry Create a container registry using the [az acr create][az-acr-create] command. The registry name must be unique within Azure, and contain 5-50 alphanumeric characters. In the following example, *mycontainerregistry001* is used. Update this to a unique value. ```azurecli az acr create --resource-group myResourceGroup \ --name mycontainerregistry001 \ --sku Premium ``` This example creates a *Premium* registry. Connected registries are supported only in the *Premium* tier of Azure container registry. For details on available service tiers, see [Container registry service tiers][container-registry-skus]. ## Enable the dedicated data endpoint for the cloud registry For the connected registries to communicate with the cloud registry, the dedicated data endpoint for the Azure Container Registry in the cloud should be enabled by using the [az acr update][az-acr-update] command as follows: ```azurecli az acr update -n mycontainerregistry001 \ --data-endpoint-enabled ``` ## Import images into the container registry This and subsequent quickstart guides use two repositories: - `acr/connected-registry` is the repository that contains the images used to deploy the connected registry on your premises. This repository will also be synchronized to the connected registry in case you want to implement nested registries scenario. - `hello-world` is the repository that will be synchronized to the connected registry and pulled by the connected registry clients. The easiest way to populate those repositories is to use the `az acr import` command as follows: ```azurecli az acr import -n mycontainerregistry001 --source mcr.microsoft.com/acr/connected-registry:0.3.0 az acr import -n mycontainerregistry001 --source mcr.microsoft.com/azureiotedge-agent:1.2 az acr import -n mycontainerregistry001 --source mcr.microsoft.com/azureiotedge-hub:1.2 az acr import -n mycontainerregistry001 --source mcr.microsoft.com/azureiotedge-api-proxy:latest az acr import -n mycontainerregistry001 --source mcr.microsoft.com/hello-world:latest ``` ## Create a connected registry resource for pull/push functionality Create a connected registry using the [az acr connected-registry create][az-acr-connected-registry-create] command. Name must start with a letter and contain only alphanumeric characters. It must be 5 to 40 characters long and unique in the hierarchy for this Azure Container Registry. ```azurecli az acr connected-registry create --registry mycontainerregistry001 \ --name myconnectedregistry \ --repository "hello-world" "acr/connected-registry" "azureiotedge-agent" "azureiotedge-hub" "azureiotedge-api-proxy" ``` The above command will create a connected registry resource in Azure and link it to the *mycontainerregistry001* cloud ACR. The *hello-world* and *acr/connected-registry* repositories will be synchronized between the cloud ACR and the registry on premises. Because no `--mode` option is specified for the connected registry, it will allow _pull_ and _push_ functionality by default. Because there is no synchronization schedule defined for this connected registry, both repositories will be synchronized between the cloud registry and the connected registry without interruptions. > [!IMPORTANT] > To support nested scenarios where lower layers have no Internet access, you must always allow synchronization of the `acr/connected-registry` repository. This repository contains the image for the connected registry runtime. ## Create a connected registry resource for pull-only functionality You can use the connected registry [az acr connected-registry create][az-acr-connected-registry-create] command to create a connected registry with _pull_-only functionality. ```azurecli az acr connected-registry create --registry mycontainerregistry001 \ --parent myconnectedregistry \ --name myconnectedmirror \ --repository "hello-world" "acr/connected-registry" "azureiotedge-agent" "azureiotedge-hub" "azureiotedge-api-proxy" \ --mode mirror ``` The above command will create a connected registry resource in Azure and link it to the *mycontainerregistry001* cloud ACR. The *hello-world* repository will be synchronized between the cloud ACR and the registry on premises. This resource will be enabled for _pull_-only functionality once deployed. Because there is no synchronization schedule defined for this connected registry, both repositories will be synchronized between the cloud registry and the connected registry without interruptions. ## Verify that the resources are created You can use the connected registry [az acr connected-registry list][az-acr-connected-registry-list] command to verify that the resources are created. ```azurecli az acr connected-registry list \ --registry mycontainerregistry001 \ --output table ``` You should see a response as follows: ``` NAME MODE STATUS PARENT LOGIN SERVER LAST SYNC ------------------- -------- -------- -------- -------------- ----------- myconnectedregistry registry myconnectedmirror mirror ``` ## Next steps In this quickstart, you used Azure CLI to create a connected registry resources in Azure. Those new connected registry resources are tied to your Azure Container Registry and allow synchronization of artifact between the cloud registry and the on-premises registry. Continue to the connected registry deployment guides to learn how to deploy the connected registry on your on-premises infrastructure. > [Quickstart: Deploy connected registry on IoT Edge][quickstart-deploy-connected-registry-iot-edge-cli] [az-acr-connected-registry-create]: https://docs.microsoft.com/cli/azure/acr/connected-registry?view=azure-cli-latest#az_acr_connected_registry_create [az-acr-connected-registry-list]: https://docs.microsoft.com/cli/azure/acr/connected-registry?view=azure-cli-latest#az_acr_connected_registry_list [az-acr-create]: https://docs.microsoft.com/cli/azure/acr?view=azure-cli-latest#az_acr_create [az-acr-update]: https://docs.microsoft.com/cli/azure/acr?view=azure-cli-latest#az_acr_update [az-group-create]: https://docs.microsoft.com/cli/azure/group?view=azure-cli-latest#az_group_create [container-registry-intro]: https://docs.microsoft.com/azure/container-registry/container-registry-intro [container-registry-skus]: https://docs.microsoft.com/azure/container-registry/container-registry-skus [quickstart-deploy-connected-registry-azure-arc]: quickstart-deploy-connected-registry-azure-arc.md [quickstart-deploy-connected-registry-iot-edge-cli]: quickstart-deploy-connected-registry-iot-edge-cli.md ================================================ FILE: docs/preview/connected-registry/quickstart-deploy-connected-registry-iot-edge-cli.md ================================================ --- title: Quickstart - Deploy a connected registry to an IoT Edge device description: Use Azure Container Registry CLI commands and Azure portal to deploy a connected registry to an Azure IoT Edge device. ms.topic: quickstart ms.date: 12/04/2020 ms.author: memladen author: toddysm ms.custom: --- # Quickstart: Deploy a connected registry to an IoT Edge device In this quickstart, you use [Azure Container Registry][container-registry-intro] commands to deploy a connected registry to an Azure IoT Edge device. You can review the [ACR connected registry introduction](intro-connected-registry.md) for details about the connected registry feature of Azure Container Registry. ## Prerequisites - Use [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/quickstart) using the bash environment. [![https://docs.microsoft.com/en-us/azure/includes/media/cloud-shell-try-it/hdi-launch-cloud-shell.png](https://docs.microsoft.com/en-us/azure/includes/media/cloud-shell-try-it/hdi-launch-cloud-shell.png)](https://shell.azure.com/) - If you prefer, [install](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) the Azure CLI to run CLI reference commands. - If you're using a local install, sign in with Azure CLI by using the [az login](https://docs.microsoft.com/en-us/cli/azure/reference-index#az_login) command. To finish the authentication process, follow the steps displayed in your terminal. See [Sign in with Azure CLI](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli) for additional sign-in options. - When you're prompted, install Azure CLI extensions on first use. For more information about extensions, see [Use extensions with Azure CLI](https://docs.microsoft.com/en-us/cli/azure/azure-cli-extensions-overview). - Run [az version](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_version) to find the version and dependent libraries that are installed. To upgrade to the latest version, run [az upgrade](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_upgrade). - The Azure CLI commands in this article are formatted for the Bash shell. If you're using a different shell like PowerShell or Command Prompt, you may need to adjust line continuation characters or variable assignment lines accordingly. This article uses variables to minimize the amount of command editing required. ## Before you begin This tutorial requires an Azure IoT Edge device to be set up upfront. You can use the [Deploy your first IoT Edge module to a virtual Linux device](https://github.com/MicrosoftDocs/azure-docs/blob/master/articles/iot-edge/quickstart-linux.md) quickstart guide to learn how to deploy a virtual IoT Edge device. The connected registry is deployed as a module on the IoT Edge device. Also, make sure that you have created the connected registry resource in Azure as described in the [Create connected registry using the CLI][quickstart-connected-registry-cli] quickstart guide. Both, `registry` and `mirror` modes will work for this scenario. ## Import the connected registry image to your registry To support nested IoT Edge scenarios, the container image for the connected registry runtime must be available in your private Azure Container Registry. Use the [az acr import][az-acr-import] command to import the connected registry image into your private registry. ```azurecli az acr import \ --name mycontainerregistry001 \ --source mcr.microsoft.com/acr/connected-registry:0.3.0 ``` To learn more about nested IoT Edge scenarios, please visit [Tutorial: Create a hierarchy of IoT Edge devices](https://github.com/MicrosoftDocs/azure-docs/blob/master/articles/iot-edge/tutorial-nested-iot-edge.md). ## Import the IotEdge and API Proxy images into your registry To support the connected registry on nested IoT Edge, you need import and set up the IoT and API proxy using the private images from acronpremiot registry. Notes: You can import those images from MCR if you don't need create nested connected registry. ```azurecli az acr import \ --name mycontainerregistry001 \ --source acronpremiot.azurecr.io/acr/microsoft/azureiotedge-agent:20210609.5 -t azureiotedge-agent:20210609.5 az acr import \ --name mycontainerregistry001 \ --source acronpremiot.azurecr.io/acr/microsoft/azureiotedge-hub:20210609.5 -t azureiotedge-hub:20210609.5 az acr import \ --name mycontainerregistry001 \ --source acronpremiot.azurecr.io/acr/microsoft/azureiotedge-api-proxy:9.9.9-dev -t azureiotedge-api-proxy:9.9.9-dev ``` ## Create a client token for access to the cloud registry The IoT Edge runtime will need to authenticate with the cloud registry to pull the images and deploy it. First, use the following command to create a scope map for the iotedge, api proxy and connected registry image repository: ```azurecli az acr scope-map create \ --description "Connected registry repo pull scope map." \ --name conected-registry-pull \ --registry mycontainerregistry001 \ --repository "acr/connected-registry" "azureiotedge-api-proxy" "azureiotedge-agent" "azureiotedge-hub" content/read ``` Next, use the following command to create a client token for the IoT Edge device and associate it to the scope map: ```azurecli az acr token create \ --name crimagepulltoken \ --registry mycontainerregistry001 \ --scope-map conected-registry-pull ``` This command will print a JSON that will include credential information similar to the following: ```json ... "credentials": { "activeDirectoryObject": null, "certificates": [], "passwords": [ { "creationTime": "2020-12-10T00:06:15.356846+00:00", "expiry": null, "name": "password1", "value": "xxxxxxxxxxxxxxxx" }, { "creationTime": "2020-12-10T00:06:15.356846+00:00", "expiry": null, "name": "password2", "value": "xxxxxxxxxxxxxxxx" } ], "username": "crimagepulltoken" } ... ``` You will need the `username` and one of the `passwords` values for the IoT Edge manifest below. > [!IMPORTANT] > Make sure that you save the generated passwords. Those are one-time passwords and cannot be retrieved. You can generate new passwords using the [az acr token credential generate][az-acr-token-credential-generate] command. More details about tokens and scope maps are available in [Create a token with repository-scoped permissions](container-registry-repository-scoped-permissions.md). ## Retrieve connected registry configuration information Before deploying the connected registry to the IoT Edge device, you will need to retrieve the configuration from the connected registry resource in Azure. Use the [az acr connected-registry install][az-acr-connected-registry-install] command to retrieve the configuration. ```azurecli az acr connected-registry install renew-credentials \ --registry mycontainerregistry001 \ --name myconnectedregistry \ ``` This will return the connection string for the connected registry including the newly generated passwords. ```json { "ACR_REGISTRY_CONNECTION_STRING": "ConnectedRegistryName=myconnectedregistry;SyncTokenName=myconnectedregistry-sync-token;SyncTokenPassword=xxxxxxxxxxxxxxxx;ParentGatewayEndpoint=mycontainerregistry001.westus2.data.azurecr.io;ParentEndpointProtocol=https", "ACR_REGISTRY_LOGIN_SERVER": "" } ``` The JSON above lists the environment variables that need to be passed to the connected registry container at run time. The following environment variables are optional: - `ACR_REGISTRY_CERTIFICATE_VOLUME` - this is required if your connected registry will be accessible via HTTPS. The volume should point to the location where the HTTPS certificates are stored. If not set, the default location is `/var/acr/certs`. - `ACR_REGISTRY_DATA_VOLUME` - this can optionally be used to overwrite the default location `/var/acr/data` where the images will be stored by the connected registry. This location must match the volume bind for the container. You will need the information for the IoT Edge manifest below. > [!IMPORTANT] > Make sure that you save the generated passwords. Those are one-time passwords and cannot be retrieved. If you issue the command again, new passwords will be generated. You can generate new passwords using the [az acr token credential generate][az-acr-token-credential-generate] command. ## Configure a deployment manifest for IoT Edge A deployment manifest is a JSON document that describes which modules to deploy to the IoT Edge device. For more information about how deployment manifests work and how to create them, see [Understand how IoT Edge modules can be used, configured, and reused](https://github.com/MicrosoftDocs/azure-docs/blob/master/articles/iot-edge/module-composition.md). To deploy the connected registry and api proxy module using the Azure CLI, save the following deployment manifest locally as a `.json` file. ```json { "modulesContent": { "$edgeAgent": { "properties.desired": { "modules": { "connected-registry": { "settings": { "image": "mycontainerregistry001.azurecr.io/acr/connected-registry:0.3.0", "createOptions": "{\"HostConfig\":{\"Binds\":[\"/home/azureuser/connected-registry:/var/acr/data\"],\"PortBindings\":{\"8080/tcp\":[{\"HostPort\":\"8080\"}]}}}" }, "type": "docker", "env": { "ACR_REGISTRY_CONNECTION_STRING": { "value": "ConnectedRegistryName=myconnectedregistry;SyncTokenName=myconnectedregistry-sync-token;SyncTokenPassword=xxxxxxxxxxxxxxxx;ParentGatewayEndpoint=mycontainerregistry001.westus2.data.azurecr.io;ParentEndpointProtocol=https" } }, "status": "running", "restartPolicy": "always", "version": "1.0" }, "IoTEdgeAPIProxy": { "settings": { "image": "mycontainerregistry001.azurecr.io/azureiotedge-api-proxy:9.9.9-dev", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"8000/tcp\":[{\"HostPort\":\"8000\"}]}}}" }, "type": "docker", "env": { "NGINX_DEFAULT_PORT": { "value": "8000" }, "CONNECTED_ACR_ROUTE_ADDRESS": { "value": "connected-registry:8080" }, "BLOB_UPLOAD_ROUTE_ADDRESS": { "value": "AzureBlobStorageonIoTEdge:11002" } }, "status": "running", "restartPolicy": "always", "version": "1.0" } }, "runtime": { "settings": { "minDockerVersion": "v1.25", "registryCredentials": { "tsmregistry": { "address": "mycontainerregistry001.azurecr.io", "password": "xxxxxxxxxxxxxxxx", "username": "crimagepulltoken" } } }, "type": "docker" }, "schemaVersion": "1.1", "systemModules": { "edgeAgent": { "settings": { "image": "mycontainerregistry001.azurecr.io/azureiotedge-agent:20210609.5", "createOptions": "" }, "type": "docker", "env": { "SendRuntimeQualityTelemetry": { "value": "false" } } }, "edgeHub": { "settings": { "image": "mycontainerregistry001.azurecr.io/azureiotedge-hub:20210609.5", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"443/tcp\":[{\"HostPort\":\"443\"}],\"5671/tcp\":[{\"HostPort\":\"5671\"}],\"8883/tcp\":[{\"HostPort\":\"8883\"}]}}}" }, "type": "docker", "status": "running", "restartPolicy": "always" } } } }, "$edgeHub": { "properties.desired": { "routes": { "route": "FROM /messages/* INTO $upstream" }, "schemaVersion": "1.1", "storeAndForwardConfiguration": { "timeToLiveSecs": 7200 } } } } } ``` > [!IMPORTANT] > If the connected registry listens on a port different from 80 and 443, the `ACR_REGISTRY_LOGIN_SERVER` value must include the port, eg. `192.168.0.100:8080`. Use the information from the previous sections to update the relevant JSON values. You will use the file path in the next section when you run the command to apply the configuration to your device. ## Deploy the connected registry and api proxy modules on IoT Edge Use the following command to deploy the connected registry and api proxy modules on the IoT Edge device: ```azurecli az iot edge set-modules \ --device-id [device id] \ --hub-name [hub name] \ --content [file path] ``` For more details you can refer to the [Deploy Azure IoT Edge modules with Azure CLI](https://github.com/MicrosoftDocs/azure-docs/blob/master/articles/iot-edge/how-to-deploy-modules-cli.md) article. To check the status of the connected registry, use the following CLI command: ```azurecli az acr connected-registry show \ --registry mycontainerregistry001 \ --name myconnectedregistry \ --output table ``` You may need to a wait few minutes until the deployment of the connected registry and api proxy complete. Make sure you open the the ports `8000`, `5671`, `8883`. The api proxy will listen on port 8000 configued as `NGINX_DEFAULT_PORT`. You can find more information about API Proxy in the [https://github.com/Azure/iotedge/tree/master/edge-modules/api-proxy-module] ## Next steps In this quickstart, you learned how to deploy a connected registry to an IoT Edge device. Continue to the next guide to learn how to pull images from the newly deployed connected registry. > [Quickstart: Pull images from a connected registry][quickstart-pull-images-from-connected-registry] > [Quickstart: Deploy connected registry on nested IoT Edge device][quickstart-connected-registry-nested] [az-acr-connected-registry-install]: https://docs.microsoft.com/cli/azure/acr/connected-registry/install?view=azure-cli-latest [az-acr-import]: https://docs.microsoft.com/cli/azure/acr?view=azure-cli-latest#az_acr_import [az-acr-token-credential-generate]: https://docs.microsoft.com/cli/azure/acr/token/credential?view=azure-cli-latest#az_acr_token_credential_generate [container-registry-intro]: container-registry-intro.md [quickstart-pull-images-from-connected-registry]: quickstart-pull-images-from-connected-registry.md [quickstart-connected-registry-cli]: quickstart-connected-registry-cli.md [quickstart-connected-registry-nested]: quickstart-deploy-connected-registry-nested-iot-edge-cli.md ================================================ FILE: docs/preview/connected-registry/quickstart-deploy-connected-registry-kubernetes-v2.md ================================================ --- title: Quickstart - Deploy a connected registry to Kubernetes cluster - V2 description: Use Azure Container Registry CLI and Helm 3 commands to deploy a connected registry to a Kubernetes cluster. ms.topic: quickstart ms.date: 09/19/2024 ms.author: yuanxi author: xyxyxyxyxyxy ms.custom: --- # Quickstart: Deploy a connected registry to Kubernetes cluster In this quickstart, you use [Azure Container Registry](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-intro) and [Helm 3](https://helm.sh/docs/intro/quickstart/) commands to deploy a connected registry Helm chart to a Kubernetes cluster. You can review the [ACR connected registry introduction](https://docs.microsoft.com/en-us/azure/container-registry/intro-connected-registry) for details about the connected registry feature of Azure Container Registry. For more details on Helm charts, see [Helm documentation](https://helm.sh/docs/topics/charts/). ## Prerequisites - [Install or upgrade Azure CLI](https://docs.microsoft.com/cli/azure/install-azure-cli) to version >= 2.64.0 - If you're using a local install, sign in with Azure CLI by using the [az login](https://docs.microsoft.com/en-us/cli/azure/reference-index#az_login) command. To finish the authentication process, follow the steps displayed in your terminal. See [Sign in with Azure CLI](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli) for additional sign-in options. - The Azure CLI commands in this article are formatted for the Bash shell. If you're using a different shell like PowerShell or Command Prompt, you may need to adjust line continuation characters or variable assignment lines accordingly. This article uses variables to minimize the amount of command editing required. - Both `ReadWrite` and `ReadOnly` modes will work for this scenario. - An up-and-running Kubernetes cluster. If you don't have one, you can create a cluster using one of these options: - [Kubernetes in Docker](https://kind.sigs.k8s.io/) - [K3s: Lightweight Kubernetes](https://rancher.com/docs/k3s/latest/quick-start/) cluster. - Self-managed Kubernetes cluster using [Cluster API](https://cluster-api.sigs.k8s.io/user/quick-start.html) - An [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/kubernetes-walkthrough) cluster - [Helm 3](https://helm.sh/docs/intro/install/) installed. **Note: the following tutorial is compatible for Helm releases >= v3.10.0.** - [Kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) installed. - A `kubeconfig` file and context pointing to your cluster. ## Create the Connected registry and synchronize with ACR Creating the Connected registry to synchronize with ACR is the foundation step for deploying the Connected registry Arc extension. Create the Connected registry, which synchronizes with the ACR registry: To create a Connected registry myconnectedregistry that synchronizes with the ACR registry myacrregistry in the resource group myresourcegroup and the repository hello-world, you can run the [az acr connected-registry create](https://review.learn.microsoft.com/en-us/cli/azure/acr/connected-registry?view=azure-cli-latest&branch=main#az-acr-connected-registry-create) command: ```cli az acr connected-registry create --registry myacrregistry \ --name myconnectedregistry \ --resource-group myresourcegroup \ --repository "hello-world" ``` - The [az acr connected-registry create](https://review.learn.microsoft.com/en-us/cli/azure/acr/connected-registry?view=azure-cli-latest&branch=main#az-acr-connected-registry-create) command creates the Connected registry with the specified repository. - The [az acr connected-registry create](https://review.learn.microsoft.com/en-us/cli/azure/acr/connected-registry?view=azure-cli-latest&branch=main#az-acr-connected-registry-create) command overwrites actions if the sync scope map named myconnectedregistry exists and overwrites properties if the sync token named myconnectedregistry exists. - The [az acr connected-registry create](https://review.learn.microsoft.com/en-us/cli/azure/acr/connected-registry?view=azure-cli-latest&branch=main#az-acr-connected-registry-create) command validates a dedicated data endpoint during the creation of the Connected registry and provides a command to enable the dedicated data endpoint on the ACR registry. ## Install the Connected Registry Helm Chart From a Kubernetes cluster node, run the following commands to install the connected registry helm chart. 1. Set environment variable to enable OCI artifact support in the Helm 3 client. `export HELM_EXPERIMENTAL_OCI=1` 2. Get the connection string for your connected registry. This command generates `password1` of the corresponding sync token resource. ```cli az acr connected-registry get-settings \ --registry $REGISTRY_NAME \ --name $CONNECTED_REGISTRY_RW \ --generate-password 1 \ --parent-protocol https ``` Command output includes the registry connection string and related settings. The following example output shows the connection string for the connected registry named _myconnectedregistry_ with parent registry _contosoregistry_. ```json { "ACR_REGISTRY_CONNECTION_STRING": "ConnectedRegistryName=myconnectedregistry;SyncTokenName=myconnectedregistry-sync-token;SyncTokenPassword=xxxxxxxxxxxxxxxx;ParentGatewayEndpoint=contosoregistry.eastus.data.azurecr.io;ParentEndpointProtocol=https" } ``` 3. Decide what storage class resource is appropriate for your Kubernetes distribution. The user is required to provide an existing storage class resource name when deploying the connected registry. You can research your distribution to learn more on predefined storage classes or how to create your own. For instance, see predefined storage resources on AKS at [Concepts - Storage in Azure Kubernetes Services (AKS)](https://docs.microsoft.com/en-us/azure/aks/concepts-storage#storage-classes). To view storage class resources on your cluster, run `kubectl get sc` 4. Run the following command to install the connected registry helm chart `helm install --namespace connected-registry connected-registry oci://mcr.microsoft.com/acr/connected-registry/chart --create-namespace --set connectionString="" --set service.clusterIP="" --set pvc.storageClassName=` 5. To view the deployed connected registry resources, run `kubectl get services,deployments,pods,secrets -n connected-registry` Note: you will see the connected registry service resource running under the cluster IP you selected. ## Pull from the Connected Registry For more information, reference [Pull images from a connected registry](https://docs.microsoft.com/en-us/azure/container-registry/pull-images-from-connected-registry). 1. Get credentials corresponding to a client token linked to the connected registry. For more information, see [Manage client tokens](https://docs.microsoft.com/en-us/azure/container-registry/overview-connected-registry-access#manage-client-tokens). The following example generates `password1` for token _pulluser_ and registry _contosoregistry_. ``` TOKEN_PWD=$(az acr token credential generate \ --name pulluser --registry contosoregistry --expiration-in-days 30 \ --password1 --query 'passwords[0].value' --output tsv) ``` 2. Create a secret with client token credentials. This client token must be linked to your connected registry. For more information, see [Manage client tokens](https://docs.microsoft.com/en-us/azure/container-registry/overview-connected-registry-access#manage-client-tokens). `kubectl create secret docker-registry regcred --docker-server= --docker-username= --docker-password= --docker-email=` 3. Create a deployment that pulls from the connected registry over HTTP. ``` cat </hello-world:v1 imagePullSecrets: - name: regcred EOF ``` 4. Run `kubectl get pods` and see your hello-world image was pulled from the connected registry. ## Uninstall Connected Registry 1. To simulate delete of all resources deployed in the helm release with name "connected-registry", run `helm uninstall connected-registry --dry-run` 2. To delete all resources deployed in the helm release with name "connected-registry", run `helm uninstall connected-registry` 3. Deactivate your connected registry resource before deploying it again. `az acr connected-registry deactivate -r contosoregistry -n myconnectedregistry` ================================================ FILE: docs/preview/connected-registry/quickstart-deploy-connected-registry-kubernetes.md ================================================ --- title: Quickstart - Deploy a connected registry to Kubernetes cluster description: Use Azure Container Registry CLI and Helm 3 commands to deploy a connected registry to a Kubernetes cluster. ms.topic: quickstart ms.date: 02/24/2023 ms.author: jeburke author: jaysterp ms.custom: --- # Quickstart: Deploy a connected registry to Kubernetes cluster In this quickstart, you use [Azure Container Registry](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-intro) and [Helm 3](https://helm.sh/docs/intro/quickstart/) commands to deploy a connected registry Helm chart to a Kubernetes cluster. You can review the [ACR connected registry introduction](https://docs.microsoft.com/en-us/azure/container-registry/intro-connected-registry) for details about the connected registry feature of Azure Container Registry. For more details on Helm charts, see [Helm documentation](https://helm.sh/docs/topics/charts/). ## Supported Kubernetes distributions ## Prerequisites - [Install or upgrade Azure CLI](https://docs.microsoft.com/cli/azure/install-azure-cli) to version >= 2.30.0 - If you're using a local install, sign in with Azure CLI by using the [az login](https://docs.microsoft.com/en-us/cli/azure/reference-index#az_login) command. To finish the authentication process, follow the steps displayed in your terminal. See [Sign in with Azure CLI](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli) for additional sign-in options. - The Azure CLI commands in this article are formatted for the Bash shell. If you're using a different shell like PowerShell or Command Prompt, you may need to adjust line continuation characters or variable assignment lines accordingly. This article uses variables to minimize the amount of command editing required. - A connected registry resource in Azure as described in the [Create connected registry using the CLI](https://docs.microsoft.com/en-us/azure/container-registry/quickstart-connected-registry-cli) quickstart guide. Both, `ReadWrite` and `ReadOnly` modes will work for this scenario. - An up-and-running Kubernetes cluster. If you don't have one, you can create a cluster using one of these options: - [Kubernetes in Docker](https://kind.sigs.k8s.io/) - [K3s: Lightweight Kubernetes](https://rancher.com/docs/k3s/latest/quick-start/) cluster. - Self-managed Kubernetes cluster using [Cluster API](https://cluster-api.sigs.k8s.io/user/quick-start.html) - An [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/kubernetes-walkthrough) cluster - [Helm 3](https://helm.sh/docs/intro/install/) installed. **Note: the following tutorial is compatible for Helm releases < v3.7.0.** - [Kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) installed. - A `kubeconfig` file and context pointing to your cluster. ## Node Setup Requirements The helm chart installs Kubernetes resources used to run a connected registry on your Kubernetes cluster. The connected registry runs as a singleton pod on one node of the cluster. The user is responsible for configuring each node of the cluster that will pull from this connected registry. To pull from the connected registry over HTTPS, the user is responsible for configuring SSL certs on each node. To pull from the connected registry of HTTP, the user must update their container runtime settings to recognize the connected registry as "insecure". See more information below. > [!WARNING] > Pulling from and pushing to the connected registry over HTTP is not secure. It is recommended to setup TLS certificates. ## Fetch the Connected Registry Helm Chart From a cluster node, run the following commands to install the connected registry helm chart. 1. Set environment variable to enable OCI artifact support in the Helm 3 client. `export HELM_EXPERIMENTAL_OCI=1` 2. Pull the connected registry helm chart from MCR `helm chart pull mcr.microsoft.com/acr/connected-registry/chart:0.4.0` 3. Export the helm chart `helm chart export mcr.microsoft.com/acr/connected-registry/chart:0.4.0` 4. View the helm chart `helm show chart connected-registry` 5. Get the connection string for your connected registry. This command generates `password1` of the corresponding sync token resource. ```cli az acr connected-registry get-settings \ --registry $REGISTRY_NAME \ --name $CONNECTED_REGISTRY_RW \ --generate-password 1 \ --parent-protocol https ``` Command output includes the registry connection string and related settings. The following example output shows the connection string for the connected registry named _myconnectedregistry_ with parent registry _contosoregistry_. ```json { "ACR_REGISTRY_CONNECTION_STRING": "ConnectedRegistryName=myconnectedregistry;SyncTokenName=myconnectedregistry-sync-token;SyncTokenPassword=xxxxxxxxxxxxxxxx;ParentGatewayEndpoint=contosoregistry.eastus.data.azurecr.io;ParentEndpointProtocol=https" } ``` ## Helm Chart Components ### Storage Class Decide what storage class resource is appropriate for your Kubernetes distribution. The user is required to provide an existing storage class resource name when deploying the connected registry. You can research your distribution to learn more on predefined storage classes or how to create your own. For instance, see predefined storage resources on AKS at [Concepts - Storage in Azure Kubernetes Services (AKS)](https://docs.microsoft.com/en-us/azure/aks/concepts-storage#storage-classes). To view storage class resources on your cluster, run `kubectl get sc` ## HTTPS communication with the connected registry ### PKI Certificate Requirements To establish a secure HTTPS communication with the connected registry, we use PKI certificates during deployment of the connected registry chart. Here are the general requirements for these PKI certs 1. The certificates and keys must be [X.509](https://en.wikipedia.org/wiki/X.509) certificates and [Privacy-Enhanced Mail](https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail) encoded. 2. To configure the connected registry (server) certificate during installation, you must provide * A public certificate * A private key > [!IMPORTANT] > For early proof-of-concept stages, self-signed certificates might be an option but in general, proper PKI certificates signed by a Certificate Authority (CA) should be procured and used. ### Create the PKI certificate 1. Choose a service cluster IP that you will use to deploy the connected registry. The connected registry will be deployed behind a service that is accessible through this IP. Selecting the IP beforehand allows you to create the SSL cert with the IP as the subject alternate name (SAN). Kubernetes uses a set IP range when deploying services. The IP address that you choose must be in the SERVICE_CLUSTER_IP_RANGE CIDR range that is configured for your cluster. You can view the available service cluster ip range in the kube-controller pod by running `kubectl get pod kube-controller-manager- -n kube-system -o jsonpath='{.spec.containers[0].command}'` and viewing the --service-cluster-ip-range setting. > [!NOTE] > For AKS clusters, the service cluster IP range can be obtained by using the Azure CLI command > `az aks show -g -n --query "networkProfile.serviceCidr" -o tsv` Confirm that the selected IP is not already in use by running the command `kubectl get svc -A` If the selected service IP is invalid, you will see a `422 Unprocessable Entity` HTTP error response from Kubernetes at deployment time. 2. Create self-signed SSL cert with connected-registry service IP as the SAN a. `mkdir /certs` b. Run `openssl req -newkey rsa:4096 -nodes -sha256 -keyout /certs/mycert.key -x509 -days 365 -out /certs/mycert.crt -addext "subjectAltName = IP:"` > [!IMPORTANT] > For early proof-of-concept stages, self-signed certificates might be an option but in general, proper PKI certificates signed by a Certificate Authority (CA) should be procured and used. 3. Get base64 encoded strings of the certificate and key files `export TLS_CRT=$(cat mycert.crt | base64 -w0)` `export TLS_KEY=$(sudo cat mycert.key | base64 -w0) ` ### Deploy the Connected Registry 1. Deploy the connected registry, provide your connected registry connection string and existing Kubernetes storage class name. The below command deploys the release "connected-registry". Provide the base64-encoded strings of the public certificate and private key in the `tls.crt` and `tls.key` values, respectively. `helm upgrade --namespace connected-registry --create-namespace --install --set connectionString="" --set pvc.storageClassName="" --set service.clusterIP="" --set tls.crt=$TLS_CRT --set tls.key=$TLS_KEY connected-registry ./connected-registry` 2. To view the deployed connected registry resources, run `kubectl get services,deployments,pods,secrets -n connected-registry` Note: you will see the connected registry service resource running under the cluster IP you selected. ### Configure Each Node The following steps assume `containerd` is the container runtime of the Kubernetes cluster. 1. Create `/etc/containerd/certs.d/:443` directory on each node or server that will access this connected registry. Service IP is that of the connected-registry service. 2. Copy the CA cert corresponding to the SSL cert to this directory. Note: If using a self-signed cert, copy `mycert.crt` to `ca.crt` and place `ca.crt` in this directory. You should have the following file structure on the node: ``` $ tree /etc/containerd/certs.d /etc/containerd/certs.d └── :443 └── ca.crt ``` 3. Update `/etc/containerd/config.toml` so that containerd can find the directory with the trusted CA cert. Containerd will look in this directory first during runtime operations to the connected registry. Paste the following section into the file. ```toml [plugins."io.containerd.grpc.v1.cri".registry] config_path = "/etc/containerd/certs.d" [plugins."io.containerd.grpc.v1.cri".registry.configs.":443".tls] ca_file = "/etc/containerd/certs.d/:443/ca.crt" ``` 4. Restart your container runtime. For containerd, use the following command `systemctl restart containerd` ### Pull from the Connected Registry For more information, reference [Pull images from a connected registry](https://docs.microsoft.com/en-us/azure/container-registry/pull-images-from-connected-registry). 1. Get credentials corresponding to a client token linked to the connected registry. For more information, see [Manage client tokens](https://docs.microsoft.com/en-us/azure/container-registry/overview-connected-registry-access#manage-client-tokens). The following example generates `password1` for token _pulluser_ and registry _contosoregistry_. ``` TOKEN_PWD=$(az acr token credential generate \ --name pulluser --registry contosoregistry --expiration-in-days 30 \ --password1 --query 'passwords[0].value' --output tsv) ``` 2. Create a secret with client token credentials. This client token must be linked to your connected registry. For more information, see [Manage client tokens](https://docs.microsoft.com/en-us/azure/container-registry/overview-connected-registry-access#manage-client-tokens). `kubectl create secret docker-registry regcred --docker-server=:443 --docker-username=jeburkeclient --docker-password= --docker-email=someemail` 3. Create a deployment that pulls from the connected registry over HTTPS. ``` cat <:443/hello-world:v1 imagePullSecrets: - name: regcred EOF ``` ## HTTP (not secure) communication with the connected registry > [!WARNING] > Pulling from and pushing to the connected registry over HTTP is not secure. It is recommended to setup SSL certificates. You should use this option only during early stages of development. ### Deploy the Connected Registry 1. Deploy the connected registry, provide your connected registry connection string and existing Kubernetes storage class name. The below command deploys the release "connected-registry". `helm upgrade --namespace connected-registry --create-namespace --install --set connectionString="" --set httpEnabled=true --set pvc.storageClassName="" connected-registry ./connected-registry` 2. View the deployed connected registry resources `kubectl get services,deployments,pods,secrets -n connected-registry` ### Configure Each Node 1. Get the cluster IP of the deployed connected registry service. This is the endpoint we will use to pull from the connected registry. `export SERVICE_IP=$(kubectl get svc {connected registry name} -n connected-registry -o jsonpath='{.spec.clusterIP}')` 2. Add the connected registry endpoint "$(SERVICE_IP):80" as "insecure" per your container runtime settings on **each node** of your cluster that will access this connected registry. For containerd, go to /etc/containerd/config.toml and add the following settings ```toml [plugins."io.containerd.grpc.v1.cri".registry] [plugins."io.containerd.grpc.v1.cri".registry.mirrors] [plugins."io.containerd.grpc.v1.cri".registry.mirrors.":80"] endpoint = ["http://:80"] [plugins."io.containerd.grpc.v1.cri".registry.configs] [plugins."io.containerd.grpc.v1.cri".registry.configs.":80".tls] insecure_skip_verify = true ``` 3. Restart your container runtime. For containerd, use the following command `systemctl restart containerd` ### Pull from the Connected Registry For more information, reference [Pull images from a connected registry](https://docs.microsoft.com/en-us/azure/container-registry/pull-images-from-connected-registry). 1. Get credentials corresponding to a client token linked to the connected registry. For more information, see [Manage client tokens](https://docs.microsoft.com/en-us/azure/container-registry/overview-connected-registry-access#manage-client-tokens). The following example generates `password1` for token _pulluser_ and registry _contosoregistry_. ``` TOKEN_PWD=$(az acr token credential generate \ --name pulluser --registry contosoregistry --expiration-in-days 30 \ --password1 --query 'passwords[0].value' --output tsv) ``` 2. Create a secret with client token credentials. This client token must be linked to your connected registry. For more information, see [Manage client tokens](https://docs.microsoft.com/en-us/azure/container-registry/overview-connected-registry-access#manage-client-tokens). `kubectl create secret docker-registry regcred --docker-server=:80 --docker-username=jeburkeclient --docker-password= --docker-email=someemail` 3. Create a deployment that pulls from the connected registry over HTTP. ``` cat <:80/hello-world:v1 imagePullSecrets: - name: regcred EOF ``` 4. Run `kubectl get pods` and see your hello-world image was pulled from the connected registry. ## Uninstall Connected Registry 1. To simulate delete of all resources deployed in the helm release with name "connected-registry", run `helm uninstall connected-registry --dry-run` 2. To delete all resources deployed in the helm release with name "connected-registry", run `helm uninstall connected-registry` 3. Deactivate your connected registry resource before deploying it again. `az acr connected-registry deactivate -r contosoregistry -n myconnectedregistry` ================================================ FILE: docs/preview/connected-registry/quickstart-deploy-connected-registry-nested-iot-edge-cli.md ================================================ --- title: Quickstart - Deploy a connected registry to a nested IoT Edge device description: Use Azure Container Registry CLI commands and Azure portal to deploy a connected registry to a nested Azure IoT Edge device. ms.topic: quickstart ms.date: 04/28/2021 ms.author: memladen author: toddysm ms.custom: --- # Quickstart: Deploy a connected registry to a nested IoT Edge device In this quickstart, you use [Azure Container Registry][container-registry-intro] commands to deploy a connected registry to a nested Azure IoT Edge device. You can review the [ACR connected registry introduction](intro-connected-registry.md) for details about the connected registry feature of Azure Container Registry. [!INCLUDE [quickstarts](https://docs.microsoft.com/en-us/azure/iot-edge/quickstart-linux?view=iotedge-2018-06)] [!INCLUDE [azure-cli-prepare-your-environment.md](https://github.com/MicrosoftDocs/azure-docs/blob/master/includes/azure-cli-prepare-your-environment.md)] ## Before you begin This tutorial also requires that you have the knowledge about set up a connected registry on a IoT Edge device by following the [Quickstart: Deploy a connected registry to an IoT Edge device](quickstart-deploy-connected-registry-iot-edge-cli.md). Also, make sure that you have created the connected registry resource in Azure as described in the [Create connected registry using the CLI][quickstart-connected-registry-cli] quickstart guide. Only `mirror` mode will work for this scenario. ## Retrieve connected registry configuration information Before deploying the connected registry to the nested IoT Edge device, you will need to retrieve the configuration from the connected registry resource in Azure. Use the [az acr connected-registry install][az-acr-connected-registry-install] command to retrieve the configuration. ```azurecli az acr connected-registry install renew-credentials \ --registry mycontainerregistry001 \ --name myconnectedmirror \ ``` This will return the connection string for the connected registry including the newly generated passwords. ```json { "ACR_REGISTRY_CONNECTION_STRING": "ConnectedRegistryName=myconnectedmirror;SyncTokenName=myconnectedmirror-sync-token;SyncTokenPassword=xxxxxxxxxxxxxxxx;ParentGatewayEndpoint=;ParentEndpointProtocol=", "ACR_REGISTRY_LOGIN_SERVER": "" } ``` The JSON above lists the environment variables that need to be passed to the connected registry container at run time. The following environment variables are optional: - `ACR_REGISTRY_LOGIN_SERVER` - this is the hostname or FQDN of the IoT Edge device that hosts the connected registry. You will need the information for the IoT Edge manifest below. > [!IMPORTANT] > Make sure that you save the generated connection string. The connection string contains one-time password that cannot be retrieved. If you issue the command again, new passwords will be generated. You can generate new passwords using the [az acr token credential generate][az-acr-token-credential-generate] command. ## Configure a deployment manifest for the nested IoT Edge A deployment manifest is a JSON document that describes which modules to deploy to the IoT Edge device. For more information about how deployment manifests work and how to create them, see [Understand how IoT Edge modules can be used, configured, and reused](https://docs.microsoft.com/en-us/azure/iot-edge/module-composition?view=iotedge-2020-11#:~:text=The%20IoT%20Edge%20agent%20module,should%20be%20created%20and%20managed.). To deploy the connected registry module using the Azure CLI, save the following deployment manifest locally as a `.json` file. [!IMPORTANT] In the following deployment manifest, $upstream will be used as the IP or FQDN of the device hosting parent connected registry. However $upstream is not supported in env variable. The connected registry need read env variable ACR_PARENT_GATEWAY_ENDPOINT to get the parent gateway endpoint. Instead of using $upstream, connected registry supports dynamically resolving the IP or FQDN from another env variable. On the nested IoT, there's env variable $IOTEDGE_PARENTHOSTNAME on lower level that is equal to IP or FQDN of the parent device. We can pass this env variable as the value of ACR_PARENT_GATEWAY_ENDPOINT to avoid hardcode the parent IP or FQDN. ```json { "modulesContent": { "$edgeAgent": { "properties.desired": { "modules": { "connected-registry": { "settings": { "image": "$upstream/acr/connected-registry:0.3.0", "createOptions": "{\"HostConfig\":{\"Binds\":[\"/home/azureuser/connected-registry:/var/acr/data\",\"/usr/local/share/ca-certificates:/usr/local/share/ca-certificates\",\"/etc/ssl/certs:/etc/ssl/certs\",\"LogConfig\":{ \"Type\": \"json-file\", \"Config\": {\"max-size\": \"10m\",\"max-file\": \"3\"}}]}}" }, "type": "docker", "env": { "ACR_REGISTRY_CONNECTION_STRING": { "value": "ConnectedRegistryName=myconnectedmirror;SyncTokenName=myconnectedmirror-sync-token;SyncTokenPassword=xxxxxxxxxxxxxxxx;ParentGatewayEndpoint=$IOTEDGE_PARENTHOSTNAME;ParentEndpointProtocol=https" } }, "status": "running", "restartPolicy": "always", "version": "1.0" }, "IoTEdgeApiProxy": { "settings": { "image": "$upstream/azureiotedge-api-proxy:9.9.9-dev", "createOptions": "{\"HostConfig\": {\"PortBindings\": {\"443/tcp\": [{\"HostPort\": \"443\"}]}}}" }, "type": "docker", "version": "1.0", "env": { "NGINX_DEFAULT_PORT": { "value": "443" }, "CONNECTED_ACR_ROUTE_ADDRESS": { "value": "connectedRegistry:8080" }, "NGINX_CONFIG_ENV_VAR_LIST": { "value": "NGINX_DEFAULT_PORT,BLOB_UPLOAD_ROUTE_ADDRESS,CONNECTED_ACR_ROUTE_ADDRESS,IOTEDGE_PARENTHOSTNAME,DOCKER_REQUEST_ROUTE_ADDRESS" }, "BLOB_UPLOAD_ROUTE_ADDRESS": { "value": "AzureBlobStorageonIoTEdge:11002" } }, "status": "running", "restartPolicy": "always", "startupOrder": 3 } }, "runtime": { "settings": { "minDockerVersion": "v1.25", "registryCredentials": { "tsmregistry": { "address": "$upstream", "password": "xxxxxxxxxxxxxxxx", "username": "myconnectedmirror-sync-token" } } }, "type": "docker" }, "schemaVersion": "1.1", "systemModules": { "edgeAgent": { "settings": { "image": "$upstream/azureiotedge-agent:20210609.5", "createOptions": "" }, "type": "docker", "env": { "SendRuntimeQualityTelemetry": { "value": "false" } } }, "edgeHub": { "settings": { "image": "$upstream/azureiotedge-hub:20210609.5", "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"443/tcp\":[{\"HostPort\":\"443\"}],\"5671/tcp\":[{\"HostPort\":\"5671\"}],\"8883/tcp\":[{\"HostPort\":\"8883\"}]}}}" }, "type": "docker", "status": "running", "restartPolicy": "always" } } } }, "$edgeHub": { "properties.desired": { "routes": { "route": "FROM /messages/* INTO $upstream" }, "schemaVersion": "1.1", "storeAndForwardConfiguration": { "timeToLiveSecs": 7200 } } } } } ``` Use the information from the previous sections to update the relevant JSON values: - The environment variable `ACR_REGISTRY_CONNECTION_STRING` needs to be updated with the output from the `az acr connected-registry install renew-credentials` command above. You will need to manually replace the `ParentGatewayEndpoint` with the $IOTEDGE_PARENTHOSTNAME. You will also need to select the proper protocol in `ParentEndpointProtocol`. - For each module in the manifest, you should update the registry endpoint to the $upstream. You will use the file path in the next section when you run the command to apply the configuration to your device. ## Setup and deploy the connected registry module on nested IoT Edge This tutorial requires a nested Azure IoT Edge device to be set up upfront. You can use the [Deploy your first IoT Edge module to a virtual Linux device](https://docs.microsoft.com/en-us/azure/iot-edge/quickstart-linux?view=iotedge-2020-11) quickstart guide to learn how to deploy a virtual IoT Edge device. To create a nested IoT Edge devices, follow the instructions [Tutorial: Create a hierarchy of IoT Edge devices](https://docs.microsoft.com/en-us/azure/iot-edge/tutorial-nested-iot-edge?view=iotedge-2020-11) to learn how to configure hierarchical IoT edge devices. The connected registry is deployed as a module on the nested IoT Edge device. Based on the tutorial, it overall includes the following steps: 1. Create top level and lower level vms from existing template. The template will also install the iot agent on it. You can use the [Tutorial: Install or uninstall Azure IoT Edge for Linux](https://docs.microsoft.com/en-us/azure/iot-edge/how-to-install-iot-edge?view=iotedge-2020-11) to learn how to manually set up the machine if you need deploy from your own devices. 2. Use the iotedge-config tool to create and configure your hierarchy, follow the steps below in the Azure CLI: ```json mkdir nestedIotEdgeTutorial cd ~/nestedIotEdgeTutorial wget -O iotedge_config.tar "https://github.com/Azure-Samples/iotedge_config_cli/releases/download/latest/iotedge_config_cli.tar.gz" tar -xvf iotedge_config.tar ``` This will create the iotedge_config_cli_release folder in your tutorial directory. The template file used to create your device hierarchy is the iotedge_config.yaml file found in ~/nestedIotEdgeTutorial/iotedge_config_cli_release/templates/tutorial. In the same directory, there're two deployment manifests for top and lower level deploymentTopLayer.json and deploymentLowerLayer.json files. Refer the #4 below on how to prepare them. 3. Edit iotedge_config.yaml with your information. This include the iothub_hostname, iot_name, deployment template file for both top layer and child as well as the credentials used to pull the image from upstream. Please refer [Quickstart: Deploy a connected registry to an IoT Edge device](quickstart-deploy-connected-registry-iot-edge-cli.md) if you are not familar how to create a client token. And you also make sure the client token get the permissions to pull all the required images. Below is a sample config. ```json config_version: "1.0" iothub: iothub_hostname: myiothub.azure-devices.net iothub_name: myiothub ## Authentication method used by IoT Edge devices: symmetric_key or x509_certificate authentication_method: symmetric_key ## Root certificate used to generate device CA certificates. Optional. If not provided a self-signed CA will be generated # certificates: # root_ca_cert_path: "" # root_ca_cert_key_path: "" ## IoT Edge configuration template to use configuration: template_config_path: "./templates/tutorial/device_config.toml" default_edge_agent: "$upstream:8000/azureiotedge-agent:20210609.5" ## Hierarchy of IoT Edge devices to create edgedevices: device_id: top-layerx edge_agent: "mycontainerregistry001.azurecr.io/azureiotedge-agent:20210609.5" ## Optional. If not provided, default_edge_agent will be used deployment: "./templates/tutorial/deploymentTopLayer.json" ## Optional. If provided, the given deployment file will be applied to the newly created device # hostname: "FQDN or IP" ## Optional. If provided, install.sh will not prompt user for this value nor the parent_hostname value container_auth: // The token used to pull the image from cloud registry serveraddress: "mycontainerregistry001.azurecr.io" username: "crimagepulltokentop" password: "xxxxxxxxxxxxxxxx" child: - device_id: lower-layerx deployment: "./templates/tutorial/deploymentLowerLayer.json" ## Optional. If provided, the given deployment file will be applied to the newly created device # hostname: "FQDN or IP" ## Optional. If provided, install.sh will not prompt user for this value nor the parent_hostname value container_auth: //The token used to pull the image from parent connected registry serveraddress: "$upstream:8000" username: "crimagepulltokenlower" password: "xxxxxxxxxxxxxxxx" ``` 4. Prepare the top level and lower level deployment files (deploymentTopLayer.json and deploymentLowerLayer.json). The top level deployment file is the same as the one you used to set up a connected registry on a IoT Edge device. Refer [Quickstart: Deploy a connected registry to an IoT Edge device](quickstart-deploy-connected-registry-iot-edge-cli.md). Make sure you do have API proxy module deployed on top layer and open the port `8000`, `5671`, `8883`. For the lower level deployment file, please refer the above section 'Configure a deployment manifest for the nested IoT Edge' about the difference to the top level deployment file. Overall, the lowever level deployment file is similar as the top level deployment file. The differences are: - It need pull all the images required from top level connected registry instead of from cloud registry or MCR. When you set up the top level connected registry, you need make sure it will sync up all the IoT agent, hub and connected registry images locally (azureiotedge-agent, azureiotedge-hub, connected-registry). The lower level IoT device need pull these images from the top level connected registry. - It need configure the parent gateway endpoint with the top level connected registry IP or FQDN instead of cloud registry. 5. Install in the top and lower level devices. Navigate to your iotedge_config_cli_releae directory and run the tool to create your hierarchy of IoT Edge devices. ```json cd ~/nestedIotEdgeTutorial/iotedge_config_cli_release ./iotedge_config --config ~/nestedIotEdgeTutorial/iotedge_config_cli_release/templates/tutorial/iotedge_config.yaml --output ~/nestedIotEdgeTutorial/iotedge_config_cli_release/outputs -f ``` With the --output flag, the toos creates the device certificates, certificate bundles, and a log file in a directory of your choice. With the -f flag set, the tool will automatically look for existing IoT Edge devices in your IoT Hub and remove them, to avoid errors and keep your hub clean. Copy the generated top-layer.zip and lower-layer.zip in above steps to the corresponding top and lower vms using scp。 ```json scp @:~ ``` Go to each device, unzip the configuration bundle. You'll need to install zip first. ```json sudo apt install zip unzip ~//.zip (unzip top-layer.zip) ``` Unzip the installation files and run ./install.sh, input the ip and host name. All are done for top layer device deployment. Run iotedge list to double check if all modules are running well. Repeat the same steps in lower level device. Unzip files and run ./install.sh. Input the ip, host name and parent hostname. All are done for lower layer deployment. Run iotedge list to double check if all modules are running well. If there're any problems e.g. invalid deployment manifest. You need manually redeploy the modules. Refer the next session on how to make a deployment manually on top or lower device. ## Manully Deploy the connected registry module on IoT Edge The following step might be covered during nested iot setup after you run install.sh on top and lower level devices. However, it is also possible the previous deployment doesn't success and you can use the following way to redeploy it. Use the following command to deploy the connected registry module on the IoT Edge device: ```azurecli az iot edge set-modules \ --device-id [device id] \ --hub-name [hub name] \ --content [file path] ``` For more details you can refer to the [Deploy Azure IoT Edge modules with Azure CLI](https://docs.microsoft.com/en-us/azure/iot-edge/how-to-deploy-modules-cli?view=iotedge-2020-11) article. To check the status of the connected registry, use the following CLI command: ```azurecli az acr connected-registry show \ --registry mycontainerregistry001 \ --name myconnectedmirror \ --output table ``` You may need to a wait few minutes until the deployment of the connected registry completes. Successful response from the command will include the following: ```azurecli connectionState: Online ``` ## Next steps In this quickstart, you learned how to deploy a connected registry to an IoT Edge device. Continue to the next guide to learn how to pull images from the newly deployed connected registry. > [Quickstart: Pull images from a connected registry][quickstart-pull-images-from-connected-registry] [az-acr-connected-registry-install]: /cli/azure/acr#az-acr-connected-registry-install [az-acr-import]: /cli/azure/acr#az-acr-import [az-acr-token-credential-generate]: /cli/azure/acr/credential#az-acr-token-credential-generate [container-registry-intro]: container-registry-intro.md [quickstart-pull-images-from-connected-registry]: quickstart-pull-images-from-connected-registry.md [quickstart-connected-registry-cli]: quickstart-connected-registry-cli.md ================================================ FILE: docs/preview/connected-registry/quickstart-pull-images-from-connected-registry.md ================================================ --- title: Quickstart - Pull images from a connected registry description: Use Azure Container Registry CLI commands to configure a client token and pull images from a connected registry. ms.topic: quickstart ms.date: 12/04/2020 ms.author: memladen author: toddysm ms.custom: --- # Quickstart: Pull images from a connected registry In this quickstart, you use [Azure Container Registry][container-registry-intro] commands to configure a client token for a connected registry and use this client token to pull images. You can review the [ACR connected registry introduction](intro-connected-registry.md) for details about the connected registry feature of Azure Container Registry. ## Prerequisites - Use [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/quickstart) using the bash environment. [![https://docs.microsoft.com/en-us/azure/includes/media/cloud-shell-try-it/hdi-launch-cloud-shell.png](https://docs.microsoft.com/en-us/azure/includes/media/cloud-shell-try-it/hdi-launch-cloud-shell.png)](https://shell.azure.com/) - If you prefer, [install](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) the Azure CLI to run CLI reference commands. - If you're using a local install, sign in with Azure CLI by using the [az login](https://docs.microsoft.com/en-us/cli/azure/reference-index#az_login) command. To finish the authentication process, follow the steps displayed in your terminal. See [Sign in with Azure CLI](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli) for additional sign-in options. - When you're prompted, install Azure CLI extensions on first use. For more information about extensions, see [Use extensions with Azure CLI](https://docs.microsoft.com/en-us/cli/azure/azure-cli-extensions-overview). - Run [az version](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_version) to find the version and dependent libraries that are installed. To upgrade to the latest version, run [az upgrade](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_upgrade). - The Azure CLI commands in this article are formatted for the Bash shell. If you're using a different shell like PowerShell or Command Prompt, you may need to adjust line continuation characters or variable assignment lines accordingly. This article uses variables to minimize the amount of command editing required. ## Before you begin Make sure that you have created the connected registry resource in Azure as described in the [Create connected registry using the CLI][quickstart-connected-registry-cli] quickstart guide and have a connected registry deployed on your premises as described in [Quickstart: Deploy a connected registry to an IoT Edge device](quickstart-deploy-connected-registry-iot-edge-cli.md) or [Quickstart: Deploy a connected registry to an Azure Arc cluster](quickstart-deploy-connected-registry-azure-arc.md). ## Create a scope map Use the following CLI command to create a scope map for read access to the `hello-world` repository: ```azurecli az acr scope-map create \ --name hello-world-scopemap \ --registry mycontainerregistry001 \ --repository hello-world content/read \ --description "Scope map for the connected registry." ``` ## Create a client token Use the following CLI command to create a client token and associate it with the newly created scope map: ```azurecli az acr token create \ --name myconnectedregistry-client-token \ --registry mycontainerregistry001 \ --scope-map hello-world-scopemap ``` The command will return details about the newly generated token including passwords. > [!IMPORTANT] > Make sure that you save the generated passwords. Those are one-time passwords and cannot be retrieved. You can generate new passwords using the [az acr token credential generate][az-acr-token-credential-generate] command. ## Update the connected registry with the client token Use the following CLI command to update the connected registry with the newly created client token: ```azurecli az acr connected-registry update \ --name myconnectedregistry \ --registry mycontainerregistry001 \ --add-client-token myconnectedregistry-client-token ``` ## Pull an image from the connected registry From a machine with access to the connected registry instance, use the following command to sign into the connected registry: ``` docker login -u myconnectedregistry-client-token -p ``` Use the following command to pull the `hello-world` image: ``` docker pull /hello-world ``` ## Next steps In this quickstart, you learned how to configure a client token for the connected registry and pull a container image. [az-acr-token-credential-generate]: https://docs.microsoft.com/cli/azure/acr/token/credential?view=azure-cli-latest#az_acr_token_credential_generate [container-registry-intro]: container-registry-intro.md [quickstart-connected-registry-cli]: quickstart-connected-registry-cli.md ================================================ FILE: docs/preview/connected-registry/quickstart-send-connected-registry-events-to-event-grid.md ================================================ --- title: Quickstart - Send connected registry events to Azure Event Grid description: Send connected registry events to Azure Event Grid. ms.topic: quickstart ms.date: 03/21/2022 ms.author: savaradh author: savaradh ms.custom: --- # Quickstart: Send connected registry artifact events to Event Grid In this quickstart, you will learn how to configure Connected registry [Azure Container Registry][container-registry-intro] to send artifact push and delete events to [Azure Event Grid][event-grid-overview] for processing and notification. You can review the [ACR connected registry introduction](intro-connected-registry.md) for details about the connected registry feature of Azure Container Registry. ## Scenario overview As described in the [introduction][container-registry-intro], the connected registry synchronizes artifacts from the parent and also allows local push and delete of the artifacts. We can configure the connected registry to notify via [Azure Event Grid][event-grid-overview] whenever there is an update to the artifact. This will allow us to perform other actions based on the availability of the artifact on the connected registry. ## Prerequisites - Use [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/quickstart) using the bash environment. [![https://docs.microsoft.com/en-us/azure/includes/media/cloud-shell-try-it/hdi-launch-cloud-shell.png](https://docs.microsoft.com/en-us/azure/includes/media/cloud-shell-try-it/hdi-launch-cloud-shell.png)](https://shell.azure.com/) - If you prefer, [install](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) the Azure CLI to run CLI reference commands. - If you're using a local install, sign in with Azure CLI by using the [az login](https://docs.microsoft.com/en-us/cli/azure/reference-index#az_login) command. To finish the authentication process, follow the steps displayed in your terminal. See [Sign in with Azure CLI](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli) for additional sign-in options. - When you're prompted, install Azure CLI extensions on first use. For more information about extensions, see [Use extensions with Azure CLI](https://docs.microsoft.com/en-us/cli/azure/azure-cli-extensions-overview). - Run [az version](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_version) to find the version and dependent libraries that are installed. To upgrade to the latest version, run [az upgrade](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_upgrade). - The Azure CLI commands in this article are formatted for the Bash shell. If you're using a different shell like PowerShell or Command Prompt, you may need to adjust line continuation characters or variable assignment lines accordingly. This article uses variables to minimize the amount of command editing required. ## Before you begin Make sure that you have created the connected registry resource in Azure as described in the [Create connected registry using the CLI](quickstart-connected-registry-cli.md) quickstart guide and have a connected registry deployed on your premises as described in [Quickstart: Deploy a connected registry to an IoT Edge device](quickstart-deploy-connected-registry-iot-edge-cli.md) or [Quickstart: Deploy a connected registry to Kubernetes cluster](quickstart-deploy-connected-registry-kubernetes.md). The connected registry image that is deployed should be of version atleast 0.6.0 Also, make sure you understand [Azure Event Grid](https://docs.microsoft.com/en-us/azure/event-grid/) and have registered to use it. You should also have an event end point ready where you would like to send your events. You can check the quickstart to configure Azure Event Grid for Azure Container Registry events as shown in [QuickStart: Container registry events on Event Grid][quickstart-eventgrid-container-registry]. Before proceeding, set the event end point and other Azure Container Registry information. ``` APP_ENDPOINT= ACR_NAME=mycontainerregistry001 ACR_CONNECTED_REGISTRY_NAME=myconnectedregistry ACR_REGISTRY_ID=$(az acr show --name $ACR_NAME --query id --output tsv) ``` In this tutorial, we configure the connected registry to send artifact events like push and delete to Azure Event Grid in a two step process. 1. Configure connected registry to generate events on patterns of interest. 2. Configure Azure Event Grid to subscribe and filter for events generated by connected registry. ## Notification Events Generation on a connected registry Use the following CLI command to configure the connected registry to generate notifications for push and delete events on all artifacts ```azurecli az acr connected-registry update -r $ACR_NAME --name $ACR_CONNECTED_REGISTRY_NAME \ --add-notifications *:* ``` If you are interested in generating events only for specific artifact patterns or specific actions, you can specify the patterns in the form shown below ```azurecli az acr connected-registry update -r $ACR_NAME \ --name $ACR_CONNECTED_REGISTRY_NAME \ --add-notifications hello-world:* hello-world:1.0:delete \ hello-world@sha256:92c7f9c92844bbbb5d0a101b22f7c2a7949e40f8ea90c8b3bc396879d95e899a:push ``` The pattern is of the format ``` artifact ":" action ``` where `artifact` and `action` are further defined as ```yaml artifact: name [ ":" tag ] [ "@" digest ] action: "push" or "delete" ``` The wildcard `*` should be used to cover all values on a section. For example, 1. \*:\* - Notify all actions on all tags and digests on all artifacts on the connected registry 2. \*:\*:\*- Notify all actions on all tags on all artifacts on the connected registry 3. \*@\*:\*- Notify all actions on all digests on all artifacts on the connected registry 4. path/to/repo:\* - Notify all actions on all artifacts on repository '/path/to/repo' on the connected registry 5. path/to/repo:myTag:\* - Notify all actions on tag 'myTag' on repository '/path/to/repo' on the connected registry 6. path/to/repo@myDigest:delete - Notify only 'delete' action on digest 'myDigest' on repository '/path/to/repo' on the connected registry 7. path/to/repo/\*:push - Notify only 'push' action on all artifacts on all repositories that match '/path/to/repo/\*' on the connected registry ## Event Grid Subscription and filtering Connected Registry Events on Event Grid In this step, you will configure eventgrid to subscribe for events from connected registry. You can use the `--advanced-filter` on event grid to filter out connected registry specific events. The connected registry events will align to the existing Azure Container Registry event types `Microsoft.ContainerRegistry.ImagePushed` and `Microsoft.ContainerRegistry.ImageDeleted`. 1. To filter events from all connected registries ```azurecli az eventgrid event-subscription create \ --name event-sub-acr \ --source-resource-id $ACR_REGISTRY_ID \ --endpoint $APP_ENDPOINT \ --advanced-filter data.connectedregistry IsNotNull ``` 2. To filter connected registry events based on connected registry name ```azurecli az eventgrid event-subscription create \ --name event-sub-acr \ --source-resource-id $ACR_REGISTRY_ID \ --endpoint $APP_ENDPOINT \ --advanced-filter data.connectedregistry IsNotNull \ --advanced-filter data.connectedregistry.name StringIn $ACR_CONNECTED_REGISTRY_NAME ``` Whenever there is a push or delete on any artifact on the connected registry, you will see the events as shown below. The events might take multiples of sync interval time to appear on endpoint based on the depth of the connected registry heirarchy. - `Microsoft.ContainerRegistry.ImagePushed` event ```yaml [{ "id": "831e1650-001e-001b-66ab-eeb76e069631", "topic": "/subscriptions//resourceGroups//providers/Microsoft.ContainerRegistry/registries//connectedRegistries/", "subject": "aci-helloworld:v1", "eventType": "Microsoft.ContainerRegistry.ImagePushed", "eventTime": "2018-04-25T21:39:47.6549614Z", "data": { "id": "31c51664-e5bd-416a-a5df-e5206bc47ed0", "timestamp": "2018-04-25T21:39:47.276585742Z", "action": "push", "target": { "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "size": 3023, "digest": "sha256:213bbc182920ab41e18edc2001e06abcca6735d87782d9cef68abd83941cf0e5", "repository": "aci-helloworld", "tag": "v1", "length": 3023, }, "connectedregistry": { "name": "" }, }, "dataVersion": "2.0", "metadataVersion": "1" }] ``` - `Microsoft.ContainerRegistry.ImageDeleted` event ```yaml [{ "id": "831e1650-001e-001b-66ab-eeb76e069631", "topic": "/subscriptions//resourceGroups//providers/Microsoft.ContainerRegistry/registries//connectedRegistries/", "subject": "aci-helloworld", "eventType": "Microsoft.ContainerRegistry.ImageDeleted", "eventTime": "2018-04-25T21:39:47.6549614Z", "data": { "id": "31c51664-e5bd-416a-a5df-e5206bc47ed0", "timestamp": "2018-04-25T21:39:47.276585742Z", "action": "delete", "target": { "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "digest": "sha256:213bbc182920ab41e18edc2001e06abcca6735d87782d9cef68abd83941cf0e5", "repository": "aci-helloworld", }, "connectedregistry": { "name": "" }, }, "dataVersion": "2.0", "metadataVersion": "1" }] ``` Please note that, if you already have event subscriptions on event grid for the concerned Azure Container Registry and you later configure generation of events on connected registry using CLI, you will immediately start to see the new connected registry events on those existing event grid subscriptions. [container-registry-intro]: https://docs.microsoft.com/azure/container-registry/ [event-grid-overview]: (https://docs.microsoft.com/en-us/azure/event-grid/overview) [quickstart-eventgrid-container-registry]: https://docs.microsoft.com/en-us/azure/container-registry/container-registry-event-grid-quickstart?toc=/azure/event-grid/toc.json ================================================ FILE: docs/preview/connected-registry/quickstart-view-connected-registry-repos-and-tags.md ================================================ --- title: Quickstart - View connected registry repositories and tags description: Use curl commands to view the repositories and tags stored in a deployed connected registry. ms.topic: quickstart ms.date: 01/06/2022 ms.author: jeburke author: jaysterp ms.custom: --- # Quickstart: View repositories and tags in a deployed connected registry In this quickstart, you use [Azure Container Registry][container-registry-intro] and [curl](https://curl.se/) commands to view available repositories and tags in a deployed connected registry. You can review the [ACR connected registry introduction](intro-connected-registry.md) for details about the connected registry feature of Azure Container Registry. ## Prerequisites - Use [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/quickstart) using the bash environment. [![https://docs.microsoft.com/en-us/azure/includes/media/cloud-shell-try-it/hdi-launch-cloud-shell.png](https://docs.microsoft.com/en-us/azure/includes/media/cloud-shell-try-it/hdi-launch-cloud-shell.png)](https://shell.azure.com/) - If you prefer, [install](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) the Azure CLI to run CLI reference commands. - If you're using a local install, sign in with Azure CLI by using the [az login](https://docs.microsoft.com/en-us/cli/azure/reference-index#az_login) command. To finish the authentication process, follow the steps displayed in your terminal. See [Sign in with Azure CLI](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli) for additional sign-in options. - When you're prompted, install Azure CLI extensions on first use. For more information about extensions, see [Use extensions with Azure CLI](https://docs.microsoft.com/en-us/cli/azure/azure-cli-extensions-overview). - Run [az version](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_version) to find the version and dependent libraries that are installed. To upgrade to the latest version, run [az upgrade](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_upgrade). - The Azure CLI commands in this article are formatted for the Bash shell. If you're using a different shell like PowerShell or Command Prompt, you may need to adjust line continuation characters or variable assignment lines accordingly. This article uses variables to minimize the amount of command editing required. ## Before you begin Make sure that you have created the connected registry resource in Azure as described in the [Create connected registry using the CLI][quickstart-connected-registry-cli] quickstart guide and have a connected registry deployed on your premises as described in [Quickstart: Deploy a connected registry to an IoT Edge device](quickstart-deploy-connected-registry-iot-edge-cli.md) or [Quickstart: Deploy a connected registry to Kubernetes cluster](quickstart-deploy-connected-registry-kubernetes.md). In this tutorial, we configure a _client token_ to list available repositories and tags. Reference [Understand access to a connected registry](overview-connected-registry-access.md) for more information on how client tokens are used for authentication with a connected registry. ## Create a client token Use the following CLI command to create a token with read access to your synced repositories. We need both `content/read` and `metadata/read` permissions to list the repositories and tags in the connected registry. The below command creates a token with read access to the "hello-world" and "testing" repositories. ```azurecli az acr token create \ --name myconnectedregistry-client-token \ --registry mycontainerregistry001 \ --repository hello-world content/read metadata/read \ --repository testing content/read metadata/read ``` The command will return details about the newly generated token including passwords. > [!IMPORTANT] > Make sure that you save the generated passwords. Those are one-time passwords and cannot be retrieved. You can generate new passwords using the [az acr token credential generate][az-acr-token-credential-generate] command. ## Update the connected registry with the client token Use the following CLI command to update the connected registry with the newly created client token: ```azurecli az acr connected-registry update \ --name myconnectedregistry \ --registry mycontainerregistry001 \ --add-client-token myconnectedregistry-client-token ``` ## Configure your machine From a machine with access to the deployed connected registry, install the following tools: 1. Install [curl](https://curl.se/), a command-line data transfer tool. `curl` is used to issue requests to the connected registry server. Enter the following command to install curl: ``` sudo apt -y install curl ``` 2. Install [jq](https://stedolan.github.io/jq/), a command-line JSON processor. This utility is used to parse JSON, and is useful in parsing the response when listing repositories and tags of a connected registry. Enter following command to install jq: ``` sudo apt -y install jq ``` 3. Install `base64`, a command-line tool that is used to base64 encode client token credentials when viewing repositories and tags of a connected registry. Enter following command to install base64: ``` sudo apt -y install coreutils ``` ## View the repositories in a deployed connected registry Run the following commands from a machine with access to the deployed connected registry. 1. Base64 encode your client token credentials and store this value in a variable. ``` ENCODED_CREDENTIALS=$(echo -n 'myconnectedregistry-client-token:' | base64) ``` 2. The following command can be referenced to acquire a token to list repositories. ``` curl --location \ --request GET 'https://:/acr/oauth2/token?service=:&scope=registry:catalog:*' \ --header 'Authorization: Basic ' ``` In this example, the connected registry endpoint is accessible over HTTP on localhost:8080. The following command fetches the access token from the connected registry and stores it in a variable. ``` ACCESS_TOKEN=$(curl --location \ --request GET 'http://localhost:8080/acr/oauth2/token?service=localhost:8080&scope=registry:catalog:*' \ --header 'Authorization: Basic '$ENCODED_CREDENTIALS | jq -r '.access_token') ``` 3. Using the above token, run the following command to list the repositories available on the connected registry. ``` curl --location \ --request GET 'http://localhost:8080/v2/_catalog' \ --header 'Authorization: Bearer '$ACCESS_TOKEN | jq '.' ``` You will see an output similar to: ```json { "repositories": [ "hello-world", "testing" ] } ``` > [!IMPORTANT] > Only those repositories that the client token has access to will be listed if they are available on the connected registry. To give the client token read access to additional repositories, use the [az acr token update][az-acr-token-update] command. ## View the tags for a repository in a deployed connected registry Run the following commands from a machine with access to the deployed connected registry. 1. Base64 encode your client token credentials and store this value in a variable. ``` ENCODED_CREDENTIALS=$(echo -n 'myconnectedregistry-client-token:' | base64) ``` 2. The following command can be referenced to acquire a token to view tags for a repository. ``` curl --location \ --request GET 'https://:/acr/oauth2/token?service=:&scope=repository::pull' \ --header 'Authorization: Basic ' ``` In this example, we will fetch tags for the "hello-world" repository. The connected registry endpoint is accessible over HTTP on localhost:8080. The following command fetches the access token from the connected registry and stores it in a variable. ``` ACCESS_TOKEN=$(curl --location \ --request GET 'http://localhost:8080/acr/oauth2/token?service=localhost:8080&scope=repository:hello-world:pull' \ --header 'Authorization: Basic '$ENCODED_CREDENTIALS | jq -r '.access_token') ``` 3. Using the above token, run the following command to list the tags in the hello-world repository on the connected registry. ``` curl --location \ --request GET 'http://localhost:8080/v2/hello-world/tags/list' \ --header 'Authorization: Bearer '$ACCESS_TOKEN | jq '.' ``` You will see an output similar to: ```json { "name": "hello-world", "tags": [ "0.1.0", "0.2.0", "0.3.0" ] } ``` [az-acr-token-update]: https://docs.microsoft.com/cli/azure/acr/token?view=azure-cli-latest#az_acr_token_update [container-registry-intro]: https://docs.microsoft.com/azure/container-registry/ [quickstart-connected-registry-cli]: quickstart-connected-registry-cli.md ================================================ FILE: docs/preview/connected-registry/release-notes.md ================================================ # Release Notes Release notes for the Azure Container Registry connected registry runtime image. The image is published at `mcr.microsoft.com/acr/connected-registry:`. ## 0.9.0 February 23, 2023 * Update runtime image to .NET 6 tags: `0.9.0`, `0.9.0-linux-amd64`, `0.9.0-linux-arm32v7`, `0.9.0-linux-arm64v8` ## 0.8.0 July 29, 2022 * Bug fix for memory leak on connected registry instance. * Bug fix to include Docker api version header in proxied responses. tags: `0.8.0`, `0.8.0-linux-amd64`, `0.8.0-linux-arm32v7`, `0.8.0-linux-arm64v8` ## 0.7.0 January 21, 2022 * Bug fix for auth issue during the activation caused by mixed-case connected registry name. * Increase the per request http client timeout used during sync from 5s to 10s. Useful for slow network scenarios. * Retry gateway API requests during sync in case of timeout. Useful for slow network scenarios. tags: `0.7.0`, `0.7.0-linux-amd64`, `0.7.0-linux-arm32v7`, `0.7.0-linux-arm64v8` ## 0.6.0 November 16, 2021 * Enable artifact push/delete notifications from the connected registry to the parent ACR. * Bug fix to ensure only once instance of `PartitionMessageFeed` is running. This was causing incorrect message sequence numbers during notification to parent. tags: `0.6.0`, `0.6.0-linux-amd64`, `0.6.0-linux-arm32v7`, `0.6.0-linux-arm64v8` ## 0.5.0 October 28, 2021 * Support for `ReadWrite` and `ReadOnly` connected registry mode types. * Support sync of OCI artifacts. * Bug fix for syncing 0 byte layer. * Bug fix where connection string could not be parsed if it started with the '=' char. tags: `0.5.0`, `0.5.0-linux-amd64`, `0.5.0-linux-arm32v7`, `0.5.0-linux-arm64v8` ## 0.3.0 Jun 9, 2021 * Support connected registry recovery in case of a missed sync iteration. * Support the `ACR_PARENT_GATEWAY_ENDPOINT` environment variable. * If synced repositories are removed from the cloud connected registry settings, only clean from the local store if the connected registry is in `Mirror` mode. * Log bearer authentication challenges as `Debug` level. * Bug fix for continuation token issue when fetching tags. * Bug fix to avoid duplicate configuration event processing. * Bug fix to merge the allowed during authentication to the connected registry. This is required when using containerd to authenticate to the connected registry. tags: `0.3.0`, `0.3.0-linux-amd64`, `0.3.0-linux-arm32v7`, `0.3.0-linux-arm64v8` ## 0.2.0 March 30, 2021 * Support hierarchical deployment of connected registries. * Support connection string configuration during connected registry installation. * Make `ACR_REGISTRY_LOGIN_SERVER` an optional environment variable. * Bug fix to ensure that a scheduled sync iteration is cancelled after surpassing the sync window. tags: `0.2.0`, `0.2.0-linux-amd64`, `0.2.0-linux-arm32v7`, `0.2.0-linux-arm64v8` ## 0.1.0 January 28, 2021 * Initial release of connected registry feature. * Support for syncing single-level connected registry with the parent Azure Container Registry. tags: `0.1.0`, `0.1.0-linux-amd64`, `0.1.0-linux-arm32v7`, `0.1.0-linux-arm64v8` ================================================ FILE: docs/preview/connected-registry/troubleshooting.md ================================================ --- title: Troubleshoot issues with connected registry description: Symptoms, causes, and resolution of common problems when setting up, configuring, and deploying connected registries ms.topic: article ms.date: 01/27/2021 ms.author: memladen author: toddysm --- # Troubleshoot issues with connected registry This article helps you troubleshoot problems you might encounter when setting up, configuring, and deploying a connected registry. ## Symptoms * Unable to activate the connected registry. The connected registry container fails to start up due to the error `"Failed to activate the connected registry as it is already activated by another instance. Only one instance is supported at any time."` ## Causes * There is already another instance of this connected registry deployed. There can only be one instance of a connected registry deployed at once. ## Potential solutions ### Deactivate the existing connected registry instance If you would like to deactivate the existing instance of the connected registry, run the `az acr connected-registry deactivate` command using the [Azure CLI](https://learn.microsoft.com/cli/azure/acr/connected-registry?view=azure-cli-latest#az-acr-connected-registry-deactivate). Then redeploy the connected registry. ### Create a new connected registry with a different name If you would like to preserve the existing instance of this connected registry, create a new connected registry with a different name to avoid the activation conflict. Run the `az acr connected-registry create` command using the [Azure CLI](https://learn.microsoft.com/cli/azure/acr/connected-registry?view=azure-cli-latest#az-acr-connected-registry-create) and deploy again using this new connected registry. ## Symptoms * Unable to push or pull images to or from the connected registry. Client error is `Error response from daemon: Get https:///v2/: http: server gave HTTP response to HTTPS client` ## Causes * The connected registry is configured for HTTP access only - [solution](#configure-docker-daemon-to-access-insecure-registry) ## Potential solutions ### Configure Docker daemon to access insecure registry The access the connected registry via HTTP, you must configure the client Docker daemon to allow access to insecure registries. The steps are described in [Test an insecure registry](https://docs.docker.com/registry/insecure/) article on Docker's web site. ## Symptoms * Unable to pull an image from the connected registry. Client error is `Error response from daemon: manifest for /: not found: manifest unknown: manifest unknown` ## Causes * The connected registry is not configured to sync this repository from the Azure Container Registry. ## Potential solutions ### Configure the connected registry to sync the repository In order to access this image, you must update the connected registry configuration to sync the repository. From the Azure CLI run `az acr connected-registry repo -r -n --add ` Wait a few minutes for the connected registry to sync the repository and try pulling the image again. ## Symptoms * Unable to push or pull images to or from the connected registry. Client error is `Error response from daemon: pull access denied for /, repository does not exist or may require 'docker login': denied: Insufficient scopes to perform the operation` ## Causes * The repository is synced to the connected registry, but the client token used for `docker login` does not have access. ## Potential solutions ### Assign permissions to the connected registry client token To update the permissions of the client token, you must update the corresponding scope map. To view the scope map resource ID associated with a token, run the following from the Azure CLI: `az acr token show -r -n -o tsv --query scopeMapId` #### Pull permissions To give the client token pull permissions to the repository, run the following from the Azure CLI: ``` az acr scope-map update \ --name \ --registry \ --add-repository content/read ``` #### Push permissions If the connected registry is in Registry mode, the client may need push access. To give the client token push permissions to the repository, run the following from the Azure CLI: ``` az acr scope-map update \ --name \ --registry \ --add-repository content/read content/write ``` Wait a few minutes for the updated client token permissions to sync to the connected registry. > [!TIP] > After updating the permissions of the client token, you may want to generate new passwords. Run `az acr token credential generate` from the Azure CLI to refresh your client token passwords. Allow a few minutes for the credentials to sync to the connected registry. Login using your new credentials with `docker login`. For more information on ACR token management please reference [Create a token with repository-scoped permissions](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-repository-scoped-permissions]). ## Symptoms * Unable to push an image to the connected registry. Client error is `denied: This operation is not allowed on this registry.` ## Causes * Images can only be pushed to a connected registry in `Registry` mode. If the connected registry is in `Mirror` mode then only readonly operations are allowed, such as `docker pull`. ## Potential solutions ### Create a new connected registry in Registry mode Once a connected registry is created, the mode cannot be changed. If you would like to push images to your connected registry, create a new resource in Registry mode. Ensure the client token linked to the new connected registry has push permissions to the synced repositories. From the Azure CLI run `az acr connected-registry create --registry --name --repository app/hello-world service/mycomponent --client-tokens ` Deploy the connected registry. For an example on how to deploy a connected registry on IoT Edge, please reference [Quickstart - Deploy a connected registry to an IoT Edge device](./quickstart-deploy-connected-registry-iot-edge-cli.md). Once deployed, you can use the client token to login and push images to the connected registry. These images will be synced from the connected registry to the ACR. ================================================ FILE: docs/preview/continuous-patching/README.md ================================================ Continuous Patching Workflow in Azure Container Registry ======================================================== ## Introduction Azure Container Registry's Continuous Patching feature automates the detection and remediation of operating system(OS) level vulnerabilities in container images. By scheduling regular scans with [Trivy](https://trivy.dev/) and applying security fixes using [Copa](https://project-copacetic.github.io/copacetic/website/), you can maintain secure, up-to-date images in your registry—without requiring access to source code or build pipelines. Simply customize the schedule and target images to keep your Azure Container Registry(ACR) environment safe and compliant > [!NOTE] > Continuous Patching is in Private Preview as of October 2024. ## Use Cases Here are a few scenarios to use Continuous Patching: - **Enforcing container security and hygiene:** Continuous Patching enables users to quickly fix OS container CVEs without the need to fully rebuild from upstream. - **Speed of Use:** Continuous Patching removes the dependency on upstream updates for specific images by updating packages automatically. Vulnerabilities can appear every day, while popular image publishers may only offer a new release once a month. With Continuous Patching, you can ensure that container images within your registry are patched as soon as the newest set of OS vulnerabilities are detected. ## Preview Limitations Continuous Patching is currently in private preview. The following limitations apply: - Windows-based container images aren't supported. - Only "OS-level" vulnerabilities that originate from system packages will be patched. This includes system packages in the container image managed by an OS package manager such as "apt” and "yum”. Vulnerabilities that originate from application packages, such as packages used by programming languages like Go, Python, and NodeJS are unable to be patched. - End of Service Life (EOSL) images are not supported by Continuous Patching. EOSL images refer to images where the underlying operating system is no longer offering updates, security patches, and technical support. Examples include images based on older operating system versions such as Debian 8 and Fedora 28. EOSL images will be skipped from the patch despite having vulnerabilities - the recommended approach is to upgrade your the underlying operating system of your image to a supported version. - Subscriptions using "Free" Azure Credits are not supported since ACR Tasks are not supported for these subscriptions. ## Prerequisites - You can use the Azure Cloud Shell or a local installation of the Azure CLI with a minimum version of 2.15.0 or later. - You have an existing Resource Group with an Azure Container Registry. - You have an Azure Container Registry with Tasks enabled. (Tasks is not supported in the free tier of ACR) ## Key Concepts Because Continuous Patching in ACR creates a new image per patch, ACR relies on a tag convention to version and identify patched images. The two main approaches are incremental and floating. ### Incremental Tagging How It Works Each new patch increments a numerical suffix (e.g., ```-1```, ```-2```, etc.) on the original tag. For instance, if the base image is python:3.11, the first patch creates ```python:3.11-1```, and a second patch on that same base tag creates ```python:3.11-2```. Special Suffix Rules - ```-1``` to ```-999```: These are considered patch tags. - ```-x``` where ```x > 999```: These are not interpreted as patch tags; instead, that entire suffix is treated as part of the original tag. (Example: ```ubuntu:jammy-20240530``` is considered an original tag, not a patched one.) This means if you push a new tag ending in ```-1``` to ```-999``` by accident, Continuous Patching will treat it like a patched image. We recommend you to avoid pushing tags that you want patched with the suffix ```-1``` to ```-999```. If ```-999``` versions of a patched image is hit, Continuous Patching will return an error. ### Floating Tagging How it works A single mutable tag, ```-patched```, will always reference the latest patched version of your image. For instance, if your base image tag is ```python:3.11```, the first patch creates ```python:3.11-patched```. With each subsequent patche, the ```-patched``` tag will automatically update to point to the most recent patched version. ![PatchingTimelineExample](./media/patching_timeline_example1.png) ### Which Should I Use? Incremental (**default**): Great for environments where auditability and rollbacks are critical, since each new patch is clearly identified with an unique tag. Floating: Ideal if you prefer a single pointer to the latest patch for your CI/CD pipelines. Reduces complexity by removing the need to update references in downstream applications per patch, but sacrifices strict versioning, making it difficult to rollback. ## Installing the Continuous Patching Workflow Run the following command to install the CLI extension: ```sh az extension add --source aka.ms/acr/patching/wheel ``` ## Enable the Continuous Patching Workflow To enable Continuous Patching, follow the series of steps below that outline the CLI process. These guidelines detail the lifecycle of a continuous patching workflow, from its creation to subsequent updates to eventual deletion. 1. Login to Azure CLI with az login ```sh az login ``` 2. Login to ACR ```sh az acr login -n ``` 3. Run the following command to create a file named ```continuouspatching.json```, which contains the Continuous Patching JSON. ```sh cat < continuouspatching.json { "version": "v1", "tag-convention" : "", "repositories": [{ "repository": "", "tags": [""], "enabled": }] } EOF ``` The schema ingests specific repositories and tags in an array format. Each variable is defined below: - ```version``` allows the ACR team to track what schema version you're on. Do not change this variable unless instructed to. - ```tag-convention``` this is an optional field. Allowed values are "incremental" or "floating" - refer to [Key Concepts](#key-concepts) for more information. - ```repositories``` is an array that consists of all objects that detail repository and tag information - ```repository``` refers to repository name - ```tags``` is an array of tags separated by commas. The wildcard ```*``` can be used to signify all tags within that repository - ```enabled``` is a Boolean value of true or false determining if the specified repo is on or off The following details an example configuration for a customer who wants to patch all tags (use the * symbol) within the repository ```python```, and to patch specifically the ```jammy-20240111``` and ```jammy-20240125``` tags in the repository ```ubuntu```. JSON example: ```json { "version": "v1", "tag-convention" : "incremental", "repositories": [{ "repository": "python", "tags": ["*"], "enabled": true }, { "repository": "ubuntu", "tags": ["jammy-20240111", "jammy-20240125"], "enabled": true, }] } ``` 4. After creating your configuration file, it is recommended to execute a dry run to verify the intended artifacts are selected by the JSON criteria. The dry run requires a parameter called ```schedule```, which specifies how often your continuous patching cycle will run. The schedule flag is measured in days, with a minimum value of 1 day, and a maximum value of 30 days. For example, if you want an image to be patched everyday, you would specify schedule as ```1d```, or 1 day. If you want a weekly patch (once a week), you would fill schedule as ```7d```, or 7 days. Command Schema: ```sh az acr supply-chain workflow create -r -g -t continuouspatchv1 --config --schedule --dry-run ``` Example Command: ```sh az acr supply-chain workflow create -r myRegistry -g myResourceGroup -t continuouspatchv1 --config ./continuouspatching.json --schedule 1d --dry-run ``` The ```--dry-run``` flag will output all specified artifacts by the JSON file configuration. Customers can verify that the right artifacts are selected. With the sample ubuntu configuration above, the following results should be displayed as output. ```sh Ubuntu: jammy-20240111 Ubuntu: jammy-20240125 ``` Help command to see all required/optional flags. ```sh az acr supply-chain workflow create --help ``` 5. Once satisfied with the dry-run results, run the ```create``` command again without the ```--dry-run``` flag to officially create your continuous patching workflow. **Important** The ```--schedule``` parameter follows a fixed-day multiplier starting from day 1 of the month. This means: - If you specify ```--schedule 7d``` and run the command on the 3rd, the next scheduled run will be on the 7th—because 7 is the first multiple of 7 (days) after the 3rd, counting from day 1 of the month. - If ```--schedule``` is 3d and today is the 7th, then the next scheduled run lands on the 9th—since 9 is the next multiple of 3 that follows 7. - If you add the flag ```--run-immediately```, you trigger an immediate patch run. The subsequent scheduled run will still be aligned to the nearest day multiple from the 1st of the month, based on your ```--schedule``` value. - The schedule counter **resets** every month. Regardless of the designated schedule, your workflow will run on the 1st of every month, then follow the specified schedule value for the remainder of the month. If my patching runs on the 28th of January, and my schedule is 7d, my next patch will run on Feburary 1st, then 8th, and continue following the 7 days. Command Schema: ```sh az acr supply-chain workflow create -r -g -t continuouspatchv1 --config --schedule --run-immediately ``` Example Command: ```sh az acr supply-chain workflow create -r myRegistry -g myResourceGroup -t continuouspatchv1 --config ./continuouspatching.json --schedule 1d --run-immediately ``` Upon a successful command (whether or not you include ```--run-immediately```), you will see: - A success message confirming that your workflow tasks have been queued. - An output parameter indicating when the next run of your workflow is scheduled, so you can track exactly when patching will occur again. Help command to see all required/optional flags. ```sh az acr supply-chain workflow create --help ``` ## Use Azure Portal to view workflow tasks Once the workflow succeeds, go to the Azure Portal to view your running tasks. Click into Services -> Repositories, and you should see a new repository named ```csscpolicies/patchpolicy```. This repository hosts the JSON configuration artifact that will be continuously referenced for continuous patching. ![PortalRepos](./media/portal_repos1.png) Next, click on "Tasks” under "Services”. You should see 3 new tasks, named the following: ![PortalTasks](./media/portal_tasks1.png) - cssc-trigger-workflow - this task scans the configuration file and calls the scan task on each respective image. - cssc-scan-image - this task scans the image for operating system vulnerabilities. This task will only trigger the patching task only if (1) operating system vulnerabilities were found, and (2) the image is not considered End of Service Life (EOSL). For more information on EOSL, please consult [Preview Limitations](#preview-limitations). - cssc-patch-image - this task patches the image. These tasks work in conjunction to execute your continuous patching workflow. You can also click on "Runs” within the "Tasks” view to see specific task runs. Here you can view status information on whether the task succeeded or failed, along with viewing a debug log. ![PortalRun](./media/portal_runs1.png) ## Use CLI to view workflow tasks You can also run the following CLI show command to see more details on each task and the general workflow. The command will output - Schedule - Creation date - System data such as last modified date, by who, etc. Command Schema ```sh az acr supply-chain workflow show -r -g -t continuouspatchv1 ``` Example Command ```sh az acr supply-chain workflow show -r myRegistry -g myResourceGroup -t continuouspatchv1 ``` Help command to see all required/optional flags ```sh az acr supply-chain workflow show --help ``` ## Updating the Continuous Patching Workflow In scenarios where you want to make edits to your continuous patching workflow, the update command is the easiest way to do so. You can update your schedule or JSON config schema with the update CLI command directly. Command Schema ```sh az acr supply-chain workflow update -r -g -t continuouspatchv1 --config --schedule ``` Example Command ```sh az acr supply-chain workflow update -r myRegistry -g myResourceGroup -t continuouspatchv1 --config ./continuouspatching.json --schedule 1d ``` Help command to see all required/optional flags ```sh az acr supply-chain workflow update --help ``` To update your schedule, run the previous command with a new input for schedule. To update your JSON configuration, we recommend making changes to the file, running a dry-run, and running the update command. You can verify the updated workflow configuration by running the following show command or by clicking into your registry portal view. ```sh az acr supply-chain workflow show -r myregistry -g myresourcegroup -t continuouspatchv1 ``` ## Deleting the Continuous Patching Workflow To delete the continuous patching workflow, please run the following CLI command Command Schema ```sh az acr supply-chain workflow delete -r -g -t continuouspatchv1 ``` Example Command ```sh az acr supply-chain workflow delete -r myregistry -g myresourcegroup -t continuouspatchv1 ``` Help command to see all required/optional flags ```sh az acr supply-chain workflow delete --help ``` Once a workflow is successfully deleted, the repository "csscpolicies/patchpolicy” will be automatically deleted. The 3 tasks that run your workflow will also be automatically deleted, along with any currently queued runs and previous logs. ## Listing Running Tasks To list the most recently executed Continuous Patching tasks, the following List command is available: ```sh az acr supply-chain workflow list -r -g [--run-status ] -t continuouspatchv1 ``` A successful result will return the following information: - Image name and tag - Workflow type - Scan status - Last scan date and time (if status failed, date would be left blank) - Scan task ID (for further debugging) - Patch Status - Last patch date and time (if status failed, date would be left blank) - Patched image name + tag - Patch task ID (for further debugging) Example ```sh ubuntu:jammy-20240111 scan status: successful scan date: 2024-07-02T14:02:00 scan task ID: abc patch status: successful patch date: 2024-07-02T14:04:00 patch task id: def patched image: ubuntu:jammy-20240111-1 workflow type: continuouspatchv1 ``` The [--run-status] will return all tasks statuses that match the specified filter. This CLI command provides important debugging information. For example, If the "failed" value is specified under run-status, only images which have failed their patching will be listed. See Appendix for a full list of possible outputs. ## Canceling Running Tasks Certain scenarios may require you to cancel tasks which are currently running or waiting to run. For this purpose, please run the following CLI command: ```sh az acr supply-chain workflow cancel-run -r -g --type ``` This command will cancel all Continuous Patching tasks within the registry with a status of "Running”, "Queued” and "Started”. The command will output a success or failure. Failure results will follow the failure pattern of the other workflow commands if the input is incorrect. Running the cancel command will only affect tasks in the current schedule. For example, if a user has their schedule for 1d, and runs the cancel command, tasks in those 3 states will be canceled for that day, but will be requeued for the next day. If the schedule was a week, then that week's tasks would be canceled, but the following week would have the tasks requeued. The main scenario for this command is when a user misconfigures their continuous patching workflow and doesn't want to wait for all tasks to finish running. ## Troubleshooting Tips Use the task list command to output all failed tasks. Specifying the "cssc-patch” command is best for failure. The documentation on the task-list [command](https://learn.microsoft.com/en-us/cli/azure/acr/task?view=azure-cli-latest#az-acr-task-list-runs) is here. Task-list command for top 10 failed patch tasks ```sh az acr task list-runs -r registryname -n cssc-patch-image --run-status Failed --top 10 ``` This command will output all failed tasks. To investigate a specific failure, grab the runID that's outputted from this command and run ```sh az acr task logs -r registryname --run-id ``` If the logs aren't sufficient, or an issue is persistent, or for any feedback, please email the ACR team at acr-patching-preview@microsoft.com ## Appendix **Possible CLI Outputs for 'List' Command** ```sh az acr supply-chain workflow list -r -g [--run-status ] ``` If scan and patch are successful ```sh image: import:dotnetapp-manual scan status: Succeeded scan date: 2024-09-13 21:05:58.841962+00:00 scan task ID: dt21 patch status: Succeeded patch date: 2024-09-13 21:07:32.841962+00:00 patch task ID: xyz2 last patched image: import:dotnetapp-manual-patched workflow type: continuouspatchv1 ``` If scan is successful but patch isn't (with a previous patched image available) ```sh image: import:dotnetapp-manual scan status: Succeeded scan date: 2024-09-13 21:05:58.841962+00:00 scan task ID: dt21 patch status: Failed patch date: 2024-09-13 21:07:32.841962+00:00 patch task ID: xyz2 last patched image: import:dotnetapp-manual-patched workflow type: continuouspatchv1 ``` If scan is successful but patch isn't (with NO previous patched image available) ```sh image: import:dotnetapp-manual scan status: Succeeded scan date: 2024-09-13 21:05:58.841962+00:00 scan task ID: dt21 patch status: Failed patch date: 2024-09-13 21:07:32.841962+00:00 patch task ID: xyz2 last patched image: ---No patch image available--- workflow type: continuouspatchv1 ``` If scan is successful and no patch is needed (no OS vulnerabilities found) ```sh image: import:dotnetapp-manual scan status: Succeeded scan date: 2024-09-13 21:05:58.841962+00:00 scan task ID: dt21 patch status: Skipped skipped patch reason: no vulnerability found in the image import:dotnetapp-manual image: patch date: ---Not Available--- patch task ID: ---Not Available--- last patched image: import:dotnetapp-manual-patched workflow type: continuouspatchv1 ``` if scan is successful and no patch is needed and NO patched image exists yet ```sh image: import:dotnetapp-manual scan status: Succeeded scan date: 2024-09-13 21:05:58.841962+00:00 scan task ID: dt21 patch status: Skipped skipped patch reason: no vulnerability found in the image import:dotnetapp-manual image: patch date: ---Not Available--- patch task ID: ---Not Available--- last patched image: ---Not Available--- workflow type: continuouspatchv1 ``` If scan is a failure and a patched image exists ```sh image: import:dotnetapp-manual scan status: Failed scan date: 2024-09-13 21:05:58.841962+00:00 scan task ID: dt21 patch status: ---Not Available--- patch date: ---Not Available--- patch task ID: ---Not Available--- last patched image: import:dotnetapp-manual-patched workflow type: continuouspatchv1 ``` If scan is a failure and NO previous patched image exists ```sh image: import:dotnetapp-manual scan status: Failed scan date: 2024-09-13 21:05:58.841962+00:00 scan task ID: dt21 patch status: ---Not Available--- patch date: ---Not Available--- patch task ID: ---Not Available--- last patched image: ---Not Available--- workflow type: continuouspatchv1 ``` If scan is currently running and a patched image exists ```sh image: import:dotnetapp-manual scan status: Running scan date: 2024-09-13 21:05:58.841962+00:00 scan task ID: dt21 patch status: ---Not Available--- patch date: ---Not Available--- patch task ID: ---Not Available--- last patched image: import:dotnetapp-manual-patched workflow type: continuouspatchv1 ``` If scan is currently running and NO patched image exists ```sh image: import:dotnetapp-manual scan status: Running scan date: 2024-09-13 21:05:58.841962+00:00 scan task ID: dt21 patch status: ---Not Available--- patch date: ---Not Available--- patch task ID: ---Not Available--- last patched image: ---Not Available--- workflow type: continuouspatchv1 ``` If patch is currently running and a patched image exists ```sh image: import:dotnetapp-manual scan status: Succeeded scan date: 2024-09-13 21:05:58.841962+00:00 scan task ID: dt21 patch status: Running patch date: 2024-09-13 21:07:32.841962+00:00 patch task ID: xyz2 last patched image: import:dotnetapp-manual-patched workflow type: continuouspatchv1 ``` If patch is currently running and NO patched image exists ```sh image: import:dotnetapp-manual scan status: Succeeded scan date: 2024-09-13 21:05:58.841962+00:00 scan task ID: dt21 patch status: Running patch date: 2024-09-13 21:07:32.841962+00:00 patch task ID: xyz2 last patched image: ---Not Available--- workflow type: continuouspatchv1 ``` ================================================ FILE: docs/preview/quarantine/quarantine-details/example.json ================================================ { "scanner": "SecurityCenter", "state": "ScanState", "link": "https://testresult/summary", "result": { "version": "0.0.1", "summary": [ { "severity": "Critical", "count": 10 }, { "severity": "High", "count": 10 }, { "severity": "Low", "count": 10 } ] } } ================================================ FILE: docs/preview/quarantine/quarantine-details/schema.json ================================================ { "$schema": "http://json-schema.org/draft-04/schema#", "title": "Azure Container Registry Quarantine Details object", "type": "object", "additionalProperties": false, "properties": { "scanner": { "description": "the name of the scanner", "type": "string" }, "state": { "description": "state of the scan result", "type": "string" }, "result": { "description": "summary of the scan result", "$ref": "#/definitions/summary" }, "link": { "description": "link to the scan report", "type": "string" } }, "definitions": { "summary": { "type": "object", "properties": { "version": { "$id": "/properties/version", "type": "string", "title": "Scan Summary Version", "default": "", "examples": [ "0.0.1" ] }, "summary": { "$id": "/properties/summary", "type": "array", "maxItems": 5, "minItems": 0, "items": { "type": "object", "properties": { "severity": { "$id": "/properties/summary/items/properties/severity", "title": "The Severity Schema", "default": "", "examples": [ "Critical" ], "enum": [ "Critical", "High", "Medium", "Low", "None" ] }, "count": { "$id": "/properties/summary/items/properties/count", "type": "integer", "title": "count", "default": 0, "examples": [ 10 ] } } } }, "description": { "$id": "/properties/description", "type": "string", "title": "Description", "default": "", "maxLength": 1024 } }, "required": [ "version", "summary" ] } } } ================================================ FILE: docs/preview/quarantine/readme.md ================================================ # Quarantine Pattern To assure a registry only contains images that have been vulnerability scanned, ACR introduces the Quarantine pattern. When a registries policy is set to Quarantine Enabled, all images pushed to that registry are put in quarantine by default. Only after the image has been verifed, and the quarantine flag removed may a subsequent pull be completed. > Note: This is an early preview of this workflow. Additional capabilities will be added, including CLI and Portal support. # Current Workflow ## Quarantined Webhook Notification No matter if you have quarantine flow enabled or not, scanner can always subscribe to the "quarantine" webhook. when an image is pushed, we will try to notify the matching "quarantine" webhook, with payload as below: ```json { "id": "0d799b14-404b-4859-b2f6-50c5ee2a2c3a", "timestamp": "2018-02-28T00:42:54.4509516Z", "action": "quarantine", "target": { "size": 1791, "digest": "sha256:91ef6", "length": 1791, "repository": "helloworld", "tag": "1"}, "request": { "id": "978fc988-zzz-yyyy-xxxx-4f6e331d1591", "host": "[registry].azurecr.io", "method": "PUT"} } ``` You can use our [management Webhook API](https://docs.microsoft.com/en-us/rest/api/containerregistry/webhooks/create) to create and subscribe for the "quarantine" webhook (the actions field need to use the "quarantine" action). >>In order to call the above management API, you need to get an access token which is be used as the Authorization header . Here is an [example](https://blogs.technet.microsoft.com/stefan_stranger/2016/10/21/using-the-azure-arm-rest-apin-get-access-token/) on how to get access token for a Service Principal: >> ``` curl --request POST "https://login.windows.net/{TenantId}/oauth2/token" --data-urlencode "resource=https://management.core.windows.net" --data-urlencode "client_id={appId}" --data-urlencode "grant_type=client_credentials" --data-urlencode "client_secret={app secret}" ``` Before Quarantine is configured on the registry, both "quarantine" and "push" webhook will be raised for each image push. The scanner can subscribe to "quarantine" webhook and conduct security scan for the newly pushed image; while normal user can subscribe to the normal "push" webhook and pull the image successfully. ## Configure Quarantine on a registry Once a user decides to enable or disable Quarantine on a registry, they can use our [management Policy API](https://docs.microsoft.com/en-us/rest/api/containerregistry/registries/update#policies). Here is the cli example. ``` id=$(az acr show --name myregistry --query id -o tsv) az resource update --ids $id --set properties.policies.quarantinePolicy.status=enabled az resource update --ids $id --set properties.policies.quarantinePolicy.status=disabled ``` Once Quarantine is enabled on a registry, for newly pushed image, it will enter quarantine state automatically and only a user with quarantine reader permissions can see the image. Meanwhile, the same "quarantine" webhook will be raised, but no "push" notification anymore. This gives the scanner a chance to scan the image first before making it available to other users. Once scanner finishes scanning the image, it can mark the image as good, which will make this image available to all other users. Meanwhile a "push" notification is generated so that other users are notified. >Please note, once the Quarantine is enabled, any images without being marked as good will be blocked for pull. This may impact user's ongoing workflow. We would recommend that before enable Qurantine mode on the registry, the scanner should finish scanning all the existing images (this can be done by using catalog API and manifest list API). User can then look at the failed images and decide if they should enable the Quarantine mode. The detailed flow is described below. ## name your registry ```bash export ACR_NAME=quarantine export REGISTRY_NAME=$ACR_NAME.azurecr.io ``` ## Login to the dogfood registry `docker login $REGISTRY_NAME` ## Push an image ``` docker push ${REGISTRY_NAME}helloworld:1 96c922e98de8: Pushed digest: sha256:80f0d5cxxxxXxxXxxxxece0db56d11cdc624ad20da9fe62d7d size: 524 ``` The image is now quarantined ## Attempt to pull the quarantined image ``` docker pull ${REGISTRY_NAME}helloworld:1 Error response from daemon: manifest for quarantinetest1.azurecr.io/helloworld:1 not found ``` ## Attempt to pull the image by its digest ``` docker pull ${REGISTRY_NAME}helloworld@sha256:80f0d5cxxxxXxxXxxxxece0db56d11cdc624ad20da9fe62d7d Error response from daemon: unknown: The operation is disallowed. ``` ## Quarantined Webhook Notification Once the image is pushed, you will receive a notification through webhooks. ## Pull the quarantined image Once the image is quarantined, you will need user with the **AcrQuarantineReader** role. The presumption here is the Vulnerability Scanning solution is configured to use this account. `docker login ${REGISTRY_NAME} -u` **[quarantinedServicePrincpalUsr]**` -p `**[quarantinedServicePrincpalPwd]** Now the user can pull the quarantined image by digest ``` docker pull [registry].azurecr.io/helloworld@sha256:sha256:80f0d5cxxxxXxxXxxxxece0db56d11cdc624ad20da9fe62d7d Pulling from helloworld Digest: sha256:80f0d5cxxxxXxxXxxxxece0db56d11cdc624ad20da9fe62d7d Status: Image is up to date for [registry].azurecr.io/helloworld@sha256:80f0d5cxxxxXxxXxxxxece0db56d11cdc624ad20da9fe62d7d ``` ## Query Attributes of an Image (manifest) Querying ACR metadata via the REST API requires an OAuth Token. To query attributes via REST, use the following workflow: 1. encode the username/password with the required permissions 1. get an access token 1. query acr metadata for the attributes of a given digest ### Get an ACR access token for the user Please refer to [this document](https://github.com/Azure/acr/tree/master/docs/Token-BasicAuth.md) on how to get an access token. example: ``` GET https://quarantinetest1.azurecr.io/oauth2/token?service=quarantinetest1.azurecr.io&scope=repository:helloworld:pull ``` ### Query the metadata API - With an OAuth token, we can now query ACR metadata: REST format: `https://`**[login-url]**`/acr/v1/`**[image]**`/_manifests/`**[digest]** - Set the header for Authorization, setting the OAuth token |Header | Value | |-------|-------| | Authorization | Bearer [token] | | Host | [login-url] | example: ``` GET https://quarantinetest1.azurecr.io/acr/v1/mytest/_manifests/sha256:80f0d5cxxxxXxxXxxxxece0db56d11cdc624ad20da9fe62d7d ``` ## Remove the Quarantine Flag Once a scan completes, a user with the **AcrQuarantineWriter** role can update the manifest attribute to removed the quarantined flag. ### Get an ACR Push access token for the user Please refer to [this document](https://github.com/Azure/acr/tree/master/docs/Token-BasicAuth.md) on how to get an access token. example: ``` GET https://quarantinetest1.azurecr.io/oauth2/token?service=quarantinetest1.azurecr-test.io&scope=repository:helloworld:pull,push ``` ### Remove the Quarantine Flag - Update manifest attributes using the access token. REST format: `PATCH https://`**[login-url]**/`acr/v1/`**[image]**`/_manifests/`**[digest]** Payload: ```json { "quarantineState": "[Passed|Failed]", "quarantineDetails": "[json string of detailed results]"} } ``` > Note: the quarantineDetails schema is defined at [here](https://github.com/Azure/acr/tree/master/docs/preview/quarantine/quarantine-details/schema.json) |Header | Value | |-------|-------| | Authorization | Bearer [token] | | Host | [login-url] | example: PATCH https://quarantinetest1.azurecr.io/acr/v1/mytest/_manifests/sha256:80f0d5c8786bb9e621a45ece0db56d11cdc624ad20da9fe62e9d25490f331d7d HTTP/1.1 ```json { "quarantineState": "Passed", "quarantineDetails": "{\"state\":\"scan passed\",\"link\":\"http://test.io/test\"}" } ``` ## Image Pushed Webhook Notification Based on the registry policy, once the image has been set to **Passed**, a Image Pushed webhook will triggered ## Image Pull With the quarantine removed, a user with standard **reader** role can pull the image, using the tag ``` docker login ${REGISTRY_NAME} docker pull ${REGISTRY_NAME}helloworld:1 ``` ================================================ FILE: docs/preview/regional-endpoints/regional-endpoints.md ================================================ --- title: Regional endpoints for geo-replicated registries (Preview) description: Learn how to use regional endpoints to target specific geo-replicas in Azure Container Registry for predictable routing and client-side failover. ms.topic: how-to ms.date: "2026-03-02" ms.author: johsh ms.service: azure-container-registry --- ## Regional endpoints for geo-replicated registries (Preview) Azure Container Registry regional endpoints allow you to target specific geo-replicas directly, bypassing Azure-managed routing. This feature is useful when you need predictable routing, client-side failover, or regional affinity for your container registry operations. > [!IMPORTANT] > Regional endpoints are currently in **private preview**. To enable the preview, see [Enroll in the preview](#enroll-in-the-preview). ## About regional endpoints When you use a geo-replicated registry's global endpoint (`myregistry.azurecr.io`), Azure automatically routes requests to the most suitable replica based on network performance. While this works well for most scenarios, it doesn't provide explicit control over which replica handles your requests. Regional endpoints solve this by providing dedicated login server URLs for each geo-replica. > [!IMPORTANT] > **Clarification: `--regional-endpoints` vs `--region-endpoint-enabled`** > > These two settings have similar names but serve different purposes: > > | Setting | Scope | Purpose | > |---------|-------|---------| > | `--regional-endpoints` | Registry-level | Enables dedicated regional endpoint URLs (`myregistry..geo.azurecr.io`) for all geo-replicas. This is the feature documented on this page. | > | `--region-endpoint-enabled` | Per-geo-replica | Controls whether the **global endpoint** (`myregistry.azurecr.io`) routes traffic to a specific geo-replica. Set to `false` to temporarily exclude a geo-replica from global endpoint routing (for maintenance or troubleshooting). Data continues syncing regardless of this setting. See [Geo-replication in Azure Container Registry](https://learn.microsoft.com/azure/container-registry/container-registry-geo-replication). | > > **These settings are independent.** Setting `--region-endpoint-enabled false` on a geo-replica: > - Excludes that geo-replica from **global endpoint** routing only. > - Does **not** disable the geo-replica's **regional endpoint** URL. If `--regional-endpoints` is enabled at the registry level, clients can still directly access that geo-replica via the regional endpoint URL. > - Does **not** stop data syncing to that geo-replica. > > **In short:** > - Enable `--regional-endpoints` at the registry level to **enable dedicated regional URLs** (for all geo-replicas) for direct access to specific geo-replicas. > - Configure `--region-endpoint-enabled` (on a specific geo-replica) to **control global endpoint routing** to a specific geo-replica. Regional endpoints provide dedicated login server URLs for each geo-replica: ``` myregistry..geo.azurecr.io ``` For example: - `myregistry.eastus.geo.azurecr.io` - `myregistry.westeurope.geo.azurecr.io` ### When to use regional endpoints | Scenario | Description | |----------|-------------| | **Client-side failover** | Implement your own failover logic that explicitly switches between regions based on health checks. | | **Regional affinity** | Ensure specific applications always use a designated replica. | | **Troubleshooting** | Test or debug a specific regional replica. | | **Push/pull consistency** | Ensure images are pushed and pulled from the same replica. | ### Regional endpoints coexist with global endpoints Enabling regional endpoints doesn't disable or replace the global endpoint. You can use both simultaneously: - Use the **global endpoint** (`myregistry.azurecr.io`) for most operations with automatic routing. - Use **regional endpoints** when you need explicit regional control. ## Prerequisites - **Premium SKU** - Regional endpoints are available exclusively on Premium tier registries. - **Azure CLI** - Version 2.74.0 or later. - **Preview feature registration** - You must register the `RegionalEndpoints` feature flag. See [Enroll in the preview](#enroll-in-the-preview). - **API version** - Regional endpoints are available in all production regions in Azure Public Cloud via the `2026-01-01-preview` ACR ARM API version. > [!NOTE] > During private preview, regional endpoints are only available in Azure Public Cloud. Support for Azure Government, Azure China, and other national clouds will be available in public preview and beyond. > [!NOTE] > Regional endpoints can be enabled on any Premium SKU registry, even without geo-replication. A registry without geo-replication has a single geo-replica in the home region, which gets one regional endpoint URL. However, the feature is most useful when your registry has at least two geo-replicas. ## Enroll in the preview To enable the regional endpoints private preview, complete the following steps before using regional endpoints. ### 1. Register the feature flag Register the `RegionalEndpoints` feature flag for your subscription: ```azurecli az feature register \ --namespace Microsoft.ContainerRegistry \ --name RegionalEndpoints ``` The feature registration is auto-approved and takes approximately 1 hour to propagate. You can check the status with: ```azurecli az feature show \ --namespace Microsoft.ContainerRegistry \ --name RegionalEndpoints ``` Wait until the `state` shows **Registered** before proceeding. ### 2. Propagate the registration Once the feature registration has propagated, update your provider registration: ```azurecli az provider register -n Microsoft.ContainerRegistry ``` ### 3. Install the preview CLI extension Install the preview Azure CLI extension for regional endpoints: Download the preview Azure CLI extension wheel file from and install it: ```azurecli # Download the .whl file from the link above, then install: az extension add \ --source acrregionalendpoint-1.0.0b1-py3-none-any.whl \ --allow-preview true ``` ## Enable regional endpoints You can enable regional endpoints when creating a new registry or update an existing registry. **Create a new registry with regional endpoints enabled for all geo-replicas:** ```azurecli az acr create \ -n myregistry \ -g myrg \ -l regionname \ --sku Premium \ --regional-endpoints enabled ``` **Enable regional endpoints for all geo-replicas for an existing registry:** ```azurecli az acr update \ -n myregistry \ -g myrg \ --regional-endpoints enabled ``` --- Regional endpoints are enabled at the registry level and apply to every geo-replica. You can't enable regional endpoints for individual replicas. When you enable regional endpoints, Azure Container Registry automatically creates login server URLs for each of your geo-replicas. ### View all endpoints Use the `az acr show-endpoints` command to view all endpoints for your registry, including the global URL, regional endpoints (if enabled), and dedicated data endpoints (if enabled): ```azurecli az acr show-endpoints --name myregistry --resource-group myrg ``` This command displays: - The global login server URL (`myregistry.azurecr.io`) - Regional endpoint URLs for each geo-replica (if regional endpoints are enabled) - Dedicated data endpoint URLs for each geo-replica (if dedicated data endpoints are enabled) ## Authenticate and use regional endpoints Regional endpoints support the same authentication methods as the global endpoint: Microsoft Entra ID (formerly Azure Active Directory), service principals, managed identities, and admin credentials. ### Sign in to a regional endpoint **Sign in to the global endpoint (default):** ```azurecli az acr login --name myregistry ``` **Sign in to a specific regional endpoint:** ```azurecli az acr login --name myregistry --endpoint eastus ``` ### Tag and push an image to a regional endpoint Tag an existing image with the regional endpoint URL, then push it: ```bash docker tag myapp:v1 myregistry.eastus.geo.azurecr.io/myapp:v1 docker push myregistry.eastus.geo.azurecr.io/myapp:v1 ``` ### Pull an image from a regional endpoint ```bash docker pull myregistry.eastus.geo.azurecr.io/myapp:v1 ``` ## Use regional endpoints with Kubernetes You can specify regional endpoints directly in Kubernetes deployment manifests. This ensures clusters in specific regions always pull from their local replica. ```yaml apiVersion: apps/v1 kind: Deployment metadata: name: myapp spec: template: spec: containers: - name: myapp image: myregistry.eastus.geo.azurecr.io/myapp:v1 ``` For information about authenticating Azure Kubernetes Service (AKS) with ACR, see [Authenticate with Azure Container Registry from Azure Kubernetes Service](https://learn.microsoft.com/azure/container-registry/container-registry-auth-aks). ## Import from specific geo-replicas When importing images between registries, you can use regional endpoints to import from a specific geo-replica of the source registry. This is useful for scenarios where you want predictable network paths or need to import from a replica in a specific region. **Import from the global endpoint (Azure chooses the replica):** ```azurecli az acr import \ --name mydownstreamregistry \ --source myupstreamregistry.azurecr.io/myapp:v1 \ --image myapp:v1 ``` **Import from a specific geo-replica using its regional endpoint:** ```azurecli az acr import \ --name mydownstreamregistry \ --source myupstreamregistry.westeurope.geo.azurecr.io/myapp:v1 \ --image myapp:v1 ``` This allows downstream registries to explicitly import from a specific geo-replica of an upstream registry, providing control over which regional replica serves the import operation. ## Network considerations ### Firewall rules When using regional endpoints, configure your firewall rules to allow access to: | Endpoint | Purpose | |----------|---------| | `myregistry..geo.azurecr.io` | Regional endpoint for registry operations | | `myregistry.azurecr.io` | Global endpoint (if also used) | | `myregistry..data.azurecr.io` | Layer downloads (if using private endpoints or dedicated data endpoints) | | `*.blob.core.windows.net` | Layer downloads (if not using private endpoints or dedicated data endpoints) | ### Private endpoints For registries with private endpoints enabled, enabling regional endpoints creates an additional private IP address for each geo-replica in all associated virtual networks. **Example**: If your registry has 3 geo-replicas and you enable regional endpoints, each virtual network with a private endpoint to your registry consumes 3 additional private IP addresses (one per regional endpoint). For more information, see [Connect privately to an Azure container registry using Azure Private Link](https://learn.microsoft.com/azure/container-registry/container-registry-private-link). ### Dedicated data endpoints Regional endpoints work with [dedicated data endpoints](https://learn.microsoft.com/azure/container-registry/container-registry-dedicated-data-endpoints). When both features are enabled, layer downloads from regional endpoints automatically redirect to the geo-replica's dedicated data endpoint. > [!TIP] > It is recommended to also enable dedicated data endpoints for optimal in-region performance when using regional endpoints: > > ```azurecli > az acr update -n --data-endpoint-enabled true > ``` ## Endpoint types reference | Endpoint type | URL format | Purpose | |---------------|------------|---------| | Global endpoint | `myregistry.azurecr.io` | Login server with Azure-managed routing to any geo-replica | | Regional endpoint | `myregistry..geo.azurecr.io` | Login server for a specific geo-replica | | Data endpoint | `myregistry..data.azurecr.io` | Layer downloads for private endpoint or dedicated data endpoint-enabled registries | ## Related content - [Geo-replication in Azure Container Registry](https://learn.microsoft.com/azure/container-registry/container-registry-geo-replication) - [Dedicated data endpoints for Azure Container Registry](https://learn.microsoft.com/azure/container-registry/container-registry-dedicated-data-endpoints) - [Connect privately using Azure Private Link](https://learn.microsoft.com/azure/container-registry/container-registry-private-link) - [Configure firewall access rules](https://learn.microsoft.com/azure/container-registry/container-registry-firewall-access-rules) ================================================ FILE: docs/roles-and-permissions.md ================================================ # ACR Roles & Permissions ACR supports a set of permissions, assigned to specific Azure Roles. Using Azure IAM, specific permissions can be assigned to users and/or service principals. The below table represents the Azure Roles and the ACR Permissions applied | Role/Permission | [ARM Access](#arm-access)| [Create/Delete ACR](#create/delete-acr) | [Push](#push) | [Pull](#pull) | [Policy Changes](#policy-changes) | [Change Quarantine State](#change-quarantine-state) | [Pull Quarantine Images](#pull-quarantine-images) | [Signature Signing](#signature-signing) | | ---------| --------- | --------- | --------- | --------- | --------- | --------- | --------- | --------- | | Owner | X | X | X | X | X | | | | | Contributor | X | X | X | X | X | | | | | Reader | X | | | X | | | | | | AcrPush | | | X | X | | | | | | AcrPull | | | | X | | | | | | AcrQuarantineWriter | | | | | | X | X | | | AcrQuarantineReader | | | | | | | X | | | AcrImageSigner | | | | | | | | X | ## Differentiating Users and Services Anytime permissions are applied, best practices suggest providing the most limited set of permissions for a person, or service, to accomplish their task. The following permission sets represent a set of capabilities that may be used by humans and headless services. ### CI/CD Solutions When automating `docker build`s from CI/CD solutions, you'll need `docker push` capabilities. For these headless service scenarios, we'd suggest assinging the **AcrPush** role. This limits the account from access through the portal. While we don't worry about code going rouge and doing additional destructive tasks, depending on how you limit the access keys, users may get the username/password credentials required to do damage. ### Container Host Nodes Likewise, nodes running your containers will need the **AcrPull** role, but shouldn't require **reader** capabilities. ### Tools like the VS Code ACR extension For tools like the VS Code ACR extension, additional resource provider access will be required to list the set of registries available. In this case, you would provide your users access to the **reader** and/or **contributor** role. These roles will allow `docker pull`, `docker push` and `az acr list`, `az acr build` and other capabilities. ## ARM Access ARM represents the Azure Resource Manager. ARM access is required for the Azure Portal and [az cli](https://docs.microsoft.com/en-us/cli/azure/). To get a list of registries, such as `az acr list`, you will need this permission set. ## Create/Delete ACR The ability to create and delete registries ## Push The ability to `docker push` an image to the registry ## Pull The ability to `docker pull` an image, that has not been quarantined, from the registry. ## Policy Changes The ability to configure policies on the registry, such as image purging, enabling quarantine and image signing. ## Change Quarantine State The ability to set the quarantine state of an image. This role should only be assigned to vulnerability scanners using service principals. Individual users, even operations people should use the vulnerability scanning solution to override the quarantine state. ## Pull Quarantine Images The ability to `docker pull` images by their digest, allowing a vulnerability scan. !Note: This role should only be assigned to vulnerability scanners using service principals. Individual users, even operations people should use the vulnerability scanning solution to override the quarantine state. ## Signature Signing The ability to sign images, usually assigned to an automated process, which would use service principals. ================================================ FILE: docs/tasks/agentpool/README.md ================================================ --- title: Agent Pools --- # Running ACR Tasks on Dedicated Agent Pools ## Introduction ACR Task Agent Pool provides [ACR Task][acr-tasks] execution in dedicated machine pools. Task Agent Pools provide: - **VNet Support:** Agent Pools may be assigned to Azure VNets, providing access the resources in the VNet (eg, Container Registry, Key Vault, Storage). - **Scale As Needed:** Agent pools can be increased as needed, or scaled to zero, billed based on allocation. - **More Memory and CPU Options:** The current preview provides 3 standard tiers, S1 (2 cpu, 3G mem), S2 (4 cpu, 8G mem), and S3 (8 cpu, 16G mem) and 1 isolated tier, I6 (64 cpu, 216G mem). - **Agent Pools per Workload:** To serve different configurations, instance agent pools based on scale and tier options to serve different types of workloads. - **Hybrid Managed Pools:** Task pools are patched and maintained by Azure. Task pools provide a balance between reserved allocation without the need to maintain the individual VMs. ACR Task Agent Pools are currently previewed in WestUS2, SouthCentralUS, EastUS2, EastUS, CentralUS, USGovArizona, USGovTexas and USGovVirginia. ## Prerequisites - [Azure CLI][azure-cli] __2.3.1__ or above. - A [__premium__ container registry][acr-tiers] in the above preview regions. ## Create and Manage an ACR Task Agent Pool - Set the default registry, simplifying CLI commands ```sh az configure --defaults acr=[registryName] ``` - Create an agent pool of tier S2 (4 cpu/instance) with 1 instance. ```sh az acr agentpool create \ --name myagentpool \ --tier S2 ``` - Scale the agent pool with more instances or scale in to 0. ```sh az acr agentpool update \ --name myagentpool \ --count 2 ``` ## Create an Agent Pool in a VNet - Task Agent Pools require access to the following Azure services. The following firewall rules must be added to any existing network security groups or user-defined routes. | Direction | Protocol | Source | Source Port | Destination | Dest Port | Used | |-----------|----------|----------------|-------------|----------------------|-----------|---------| | Outbound | TCP | VirtualNetwork | Any | AzureKeyVault | 443 | Default | | Outbound | TCP | VirtualNetwork | Any | Storage | 443 | Default | | Outbound | TCP | VirtualNetwork | Any | EventHub | 443 | Default | | Outbound | TCP | VirtualNetwork | Any | AzureActiveDirectory | 443 | Default | | Outbound | TCP | VirtualNetwork | Any | AzureMonitor | 443 | Default | [NOTE] If your Tasks require additional resources from public internet, eg, if you run docker build task that needs to pull the base images from DockerHub or restore Nuget package, please add the corresponding rules. - Create an agent pool in the VNet. ```sh subnet=$(az network vnet subnet show \ -g myvnetresourcegroup \ --vnet-name myvnetname \ -n mysubnetname \ --query id -o tsv) az acr agentpool create \ --name myagentpool \ --tier S2 \ --subnet-id $subnet ``` ## Schedule Runs on the Agent Pool - Schedule a quick run on the agent pool. ```sh az acr build \ --agent-pool myagentpool \ -t myimage:mytag \ -f Dockerfile \ https://github.com/Azure-Samples/acr-build-helloworld-node.git ``` - Create a recurring task on the agent pool. ```sh az acr task create \ -n mytask \ --agent-pool myagentpool \ -t myimage:mytag \ -f Dcokerfile \ -c https://github.com/Azure-Samples/acr-build-helloworld-node.git \ --commit-trigger-enabled false az acr task run \ -r mypremiumregistry \ -n mytask ``` - Query the agent pool queue status (current scheduled runs on the agent pool). ```sh az acr agentpool show \ -n myagentpool \ --queue-count ``` ## Preview Limitations - Task Agent Pools currently support Linux nodes. Windows nodes are not currently supported. - For each registry, the default total cpu quota of all standard agent pools is 16 and all isolated agent pools is 0. Please [open a support ticket][open-support-ticket] for additional allocation. [acr-tasks]: https://aka.ms/acr/tasks [acr-tiers]: https://aka.ms/acr/tiers [azure-cli]: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest [open-support-ticket]: https://aka.ms/acr/support/create-ticket ================================================ FILE: docs/tasks/buildx/README.md ================================================ # Build Enhancements in ACR Tasks Building Linux images using [buildx]() and [buildkit]() is in preview for [ACR Tasks](). You can open an issue by clicking [here](https://github.com/Azure/acr/issues/new/choose) if you have any issues trying it out. With `buildx`, build performance is enhanced with various advanced features, such as concurrent building and cache import/export support. The overall performance comparison is presented as below while the underlying ACR tasks are run in the south-east Asia region. | Image | build | buildx | buildx and initialize cache | buildx with cache | | ------------------------------------------------------------ | ------ | ------ | --------------------------- | ----------------- | | [oras]() | 2m48s | 2m15s | 3m0s | 25s | | [moby]() | 15m34s | 9m50s | 12m40s | 1m45s | | [docker-stacks/all-spark-notebook]() | 12m52s | 8m47s | 10m0s | 2m50s | | [azure-cli]() | 7m33s | 5m59s | 6m1s | 1m15s | | [nodejs-docker-example]() | 1m59s | 1m18s | 1m14s | 52s | As shown above, `docker buildx` is generally faster than `docker build` since `buildx` builds images concurrently with multi-stage Dockerfiles. To [build with cache](#build-with-cache), the first run of `buildx` is expected to be slower since there is no cache existing and it requires extra time to export caches. The subsequent run is expected to be faster, utilizing the existing caches. ## Set default registry name To make it easy to copy/paste commands, and avoid having to place the registry name in each command, use the ```sh az configure --defaults acr=myregistry ``` ## Run `buildx` in ACR Tasks Since `buildx` has not been integrated with ACR Tasks, it is required to build `buildx` from its source before actually using it. The `buildx` image can be built by ACR Tasks using the multi-step task YAML file [bootstrap.yaml](bootstrap.yaml) as follows. ```sh az acr run -f bootstrap.yaml /dev/null ``` The resulted `buildx` image will be pushed to `myregistry.azurecr.io/buildx`. Visually, running the `buildx` image is equivalent to run the `docker buildx` command. ### Build using `buildx` Images can be built using `buildx`. An example multi-step task YAML file [build.yaml](build.yaml) is provided and can be run as follows. ```sh az acr run -f build.yaml \ --set BUILD_CONTEXT=https://github.com/myuser/myrepo.git \ --set REPOSITORY_NAME=myrepo \ /dev/null ``` The resulted image will be pushed to `myregistry.azurecr.io/myrepo`. For instance, run the following task to build `oras` and push to `myregistry.azurecr.io/oras` using `buildx`. ```sh az acr run -f build.yaml \ --set BUILD_CONTEXT=https://github.com/deislabs/oras.git \ --set REPOSITORY_NAME=oras \ /dev/null ``` It is also possible to build local repository using `buildx`. Run the following task to build using `buildx` with the context path `local-repository-folder-path`. ```sh az acr run -f build.yaml \ --set BUILD_CONTEXT=. \ --set REPOSITORY_NAME=myrepo \ local-repository-folder-path ``` ### Build with Cache Building progress can be speeded up using caches. An example multi-step task YAML file [build_with_cache.yaml](build_with_cache.yaml) is provided and configured to export max cache. It can be run as follows. ```sh az acr run -f build_with_cache.yaml \ --set BUILD_CONTEXT=https://github.com/myuser/myrepo.git \ --set REPOSITORY_NAME=myrepo \ /dev/null ``` The resulted image will be pushed to `myregistry.azurecr.io/myrepo`, and the cache is imported from / exported to `myregistry.azurecr.io/myrepo:cache`. The first run of the building process is expected to be slower than a normal `buildx` build since it has no cache imported and it requires extra time to export the resulted cache. The subsequent runs are expected to be faster as the valid cache is imported. ================================================ FILE: docs/tasks/buildx/bootstrap.yaml ================================================ version: v1.0.0 steps: # Build buildx from source using the built-in buildkit - cmd: docker build -t binaries https://github.com/docker/buildx.git env: [ "DOCKER_BUILDKIT=1" ] # Create a new Dockerfile to set up the entry point for buildx - cmd: | bash -c 'echo "FROM binaries ENTRYPOINT [ \"/buildx\" ]" > Dockerfile' # Build the above Dockerfile - build: -t {{.Run.Registry}}/buildx . # Push the resulted buildx image to the remote registry - push: [ {{.Run.Registry}}/buildx ] ================================================ FILE: docs/tasks/buildx/build.yaml ================================================ version: v1.0.0 steps: - cmd: >- {{.Run.Registry}}/buildx build --push -t {{.Run.Registry}}/{{.Values.REPOSITORY_NAME}}:{{.Run.ID}} {{.Values.BUILD_CONTEXT}} ================================================ FILE: docs/tasks/buildx/build_with_cache.yaml ================================================ version: v1.0.0 steps: - cmd: >- {{.Run.Registry}}/buildx build --push -t {{.Run.Registry}}/{{.Values.REPOSITORY_NAME}}:{{.Run.ID}} --cache-from={{.Run.Registry}}/{{.Values.REPOSITORY_NAME}}:cache --cache-to=type=registry,ref={{.Run.Registry}}/{{.Values.REPOSITORY_NAME}}:cache,mode=max {{.Values.BUILD_CONTEXT}} ================================================ FILE: docs/tasks/buildx/build_with_cache_2.yaml ================================================ version: v1.1.0 steps: - build: -t $Registry/{{.Values.REPOSITORY_NAME}}:$ID {{.Values.BUILD_CONTEXT}} cache: enabled ================================================ FILE: docs/tasks/container-registry-tasks-overview.md ================================================ --- title: Automate OS and Framework Patching with Azure Container Registry Tasks description: An introduction to ACR Tasks, a suite of features in Azure Container Registry that provides secure, automated container image build, test and patching in the cloud. services: container-registry author: stevelas manager: balans ms.service: container-registry ms.topic: article ms.date: 08/30/2018 ms.author: stevelas --- # Automate OS & Framework Patching with ACR Tasks ACR Tasks provide a container centric compute primitive, focused on building and patching container workloads. ACR Tasks are a series of steps representing execution of one or more containers, using the container as the execution environment. ACR Tasks are defined with a `.yaml` file, identifying the steps and the dependencies each steps has upon another. Through ACR Tasks, developers can: - **[build](container-registry-task-ref-build.md)** containers using familiar syntax of `docker build` - **[push](container-registry-task-ref-push.md)** newly built images to a registry, including ACR, Docker hub and other private registries. - **[cmd](container-registry-task-ref-cmd.md)** to run a container as a function, enabling parameters passed to the container [ENTRYPOINT]. `cmd` supports run parameters including volumes and other familiar `docker run` parameters, enabling unit and functional testing with concurrent container execution. ## ACR Build and ACR Tasks [ACR Build](https://aka.ms/acr/build), (generally available September 2018), is the pre-cursor of ACR Tasks, focusing on a single step that builds and optionally pushes the built image. ACR Tasks adds the ability to break up the building of an image into more composable steps. With ACR Task steps, users have more granular control over building their images, while adding testing capabilities, all within the ACR Task compute environment. ## ACR Task Common Scenarios The most common scenarios include: - Building, tagging and pushing 1 or more container images; in series or in parallel. - Running and capturing unit test and code coverage results. - Running and capturing functional tests. ACR Tasks supports running multiple container,s executing a series of requests between them. - Task based execution, including pre/post steps of a container build. - Deploying 1 or more containers with your favorite deployment engine to your target environment. ACR Tasks can be as simple as building a single image: ```yaml version: 1.0.0 steps: - build: -t {{.Run.Registry}}/hello-world:{{.Run.ID}} . - push: ["{{.Run.Registry}}/hello-world:{{.Run.ID}}"] ``` To more complex build, test, helm package, helm deploy scenarios: ```yaml version: 1.0.0 steps: - id: build-web build: -t {{.Run.Registry}}/hello-world:{{.Run.ID}} . when: ["-"] - id: build-tests build: -t {{.Run.Registry}}/hello-world-tests ./funcTests when: ["-"] - id: push push: ["{{.Run.Registry}}/helloworld:{{.Run.ID}}"] when: ["build", "build-tests"] - id: hello-world-web cmd: {{.Run.Registry}}/helloworld:{{.Run.ID}} - id: funcTests cmd: {{.Run.Registry}}/helloworld:{{.Run.ID}} env: host=helloworld:80 when: ["hello-world-web"] - cmd: {{.Run.Registry}}/functions/helm package --app-version {{.Run.ID}} -d ./helm ./helm/helloworld/ when: ["funcTests"] - cmd: {{.Run.Registry}}/functions/helm upgrade helloworld ./helm/helloworld/ --reuse-values --set helloworld.image={{.Run.Registry}}/helloworld:{{.Run.ID}} ``` ## ACR Tasks Support the 3 Primary Phases of Development ACR Tasks highlight 3 phases of container life cycle management. - **Inner Loop Development** - Before developers git-commit their code, they can test their container builds and tasks with `az acr task run .` - **Team based commits** - Whether a team of 1, or 100, as git commits are made, tasks can be triggered for execution. See [az acr task create](container-registry-task-create.md) for establishing trigger based execution. - **Post development, OS & Framework Patching** - When developing and deploying containers, the means to patch a container involves rebuilding the image, testing and deploying the newly built and tested images. ACR Tasks support [base image update triggers](container-registry-task-create.md#BaseImageTriggers), enabling a task to run as the runtime or buildtime dependent images are updated. ## ACR Tasks Support Simple to Complex Workloads, Integrating with CI/CD Solutions Many developers may find ACR Tasks meets their needs. As the complexity increases, or users which to integrate into their existing CI/CD solutions, ACR Tasks can be integrated with CI/CD pipelines getting the benefits of fast, cloud native container execution, with the robust capabilities of other CI/CD solutions. ### Scoping and Positioning ACR Tasks With Other Azure Container Primitives As containers continue to become the [common unit of custom and ISV code deployment](https://blogs.msdn.microsoft.com/stevelasker/2016/05/26/docker-containers-as-the-new-binaries-of-deployment/), Azure container hosting continues to expand. ACR Tasks are intended to fill a gap between ACI, AKS, Batch, App Services and other Azure Services. ACR Tasks are focused on short lived execution, with multi-tenant isolation capabilities. Customers building and testing their containers should have performance similar to local builds. This includes task execution queuing, scheduling, streaming of logs. > Note: performance will continue to increase as other features come online. However, we don't know what we don't know, and we seek your feedback: ## ACR Tasks Preview Feedback ACR Tasks evolved from the container life cycle management efforts, focusing on [OS & Framework patching of containers](https://blogs.msdn.microsoft.com/stevelasker/2017/12/20/os-framework-patching-with-docker-containers-paradigm-shift/). For containers to evolve past the complexity of patching and testing virtual machines, ACR Build required the ability to run test containers. As we explored various options, we focused on the simplicity of running a container, passing in arguments and letting the developer choose what and how they wish to run their tests. The ability to run containers, for short lived bursts, at cloud scale is core to ACR Tasks. This primitive has exposed other possibilities and we seek your feedback. - [Roadmap](https://aka.ms/acr/roadmap) - for visibility into our planned work - [UserVoice](https://aka.ms/acr/uservoice) - to vote for existing requests, or create a new request - [Feedback](https://aka.ms/acr/feedback) - to provide feedback, engage in discussion with the community - [Issues](https://aka.ms/acr/issues) - to view existing bugs and issues, logging new ones ## Next steps To learn more about ACR Tasks, drill into the following topics: > [!div class="nextstepaction"] * [ACR Task Walkthrough](./container-registry-tasks-walkthrough.md) * [ACR task.yaml Reference](./container-registry-ref-acr-tasks-yaml.md) ================================================ FILE: docs/tasks/container-registry-tasks-walkthrough.md ================================================ --- title: ACR Task Walkthrough description: Walkthrough, using ACR Tasks services: container-registry author: stevelas manager: balans ms.service: container-registry ms.topic: article ms.date: 08/31/2018 ms.author: stevelas --- # ACR Task Walkthrough ACR Tasks provide a container centric compute primitive, focused on building and patching containers. This doc covers a walkthrough to understand the capabilities of ACR Tasks. ## ACR Task Execution Model ACR Tasks take advantage of the container execution and isolation model, enabling customers to run any series of containers as commands across a common directory. ACR Tasks provide a common context and conditional/dependency flow between steps providing primitive, yet robust scenarios. By deferring the execution to containers, ACR Tasks has minimal dependencies between the Task execution environment and the code within a container. Using containers as a collection of commands; developers may use any language or framework they desire, running on Linux or Windows operating systems, minimizing version dependency. # Task Step Types ACR Tasks supports three step types: - **[build](#build)** containers using familiar syntax of `docker build` - **[push](#push)** supports `docker push` of newly built or re-tagged images to a registry, including ACR, Docker hub and other private registries. - **[cmd](#cmd)** to run a container as a command, enabling parameters passed to the containers `[ENTRYPOINT]`. `cmd` supports run parameters including volumes and other familiar `docker run` parameters, enabling unit and functional testing with concurrent container execution. # Running Samples Samples referenced use `az acr run` and assume a default registry is configured. - Configure a default registry Assuming your registry is named yourRegistry.azurecr.io, run the following ```sh az configure --defaults acr=yourRegistry ``` > **Note:** As of 9/9/18, `az acr run` is not yet public. Replace `az acr run` with `az acr build`, using the `-f` parameter to reference the `task.yaml` file. ## Building A Single Image Using [ACR Build](https://aka.ms/acr/build), users can easily build and optionally push single images. ```sh az acr build -t hello-world:{{.Build.ID}} https://github.com/Azure-Samples/acr-build-helloworld-node.git ``` The equivalent ACR task would involve: ```yaml version: 1.0-preview-1 steps: - build: -t {{.Run.Registry}}/hello-world:{{.Run.ID}} . - push: ["{{.Run.Registry}}/hello-world:{{.Run.ID}}"] ``` The task.yaml version does the following: - breaks up build and push into separate steps - changes `Build.ID` to [Run.ID](./container-registry-ref-acr-tasks-yaml.md#runid) to better represent a run, which may do many things, in addition to `docker build` - provides a fully qualified reference to the target registry using [Run.Registry](./container-registry-ref-acr-tasks-yaml.md#run.registry). ACR Tasks supports pushing images to other registries. To test the above yaml, run the following command in [cloud shell](https://shell.azure.com) or any other bash environment. ```sh az acr run -f build-push-hello-world.yaml https://github.com/azure-samples/acr-tasks.git ``` ### WORK IN PROGRESS --- > [!div class="nextstepaction"] * [ACR Task Overview](./container-registry-tasks-overview.md) * [ACR task.yaml Reference](./container-registry-ref-acr-tasks-yaml.md) ================================================ FILE: docs/tasks/run-as-deployment/README.md ================================================ --- title: Deploy with ARM templates --- # Running ACR Tasks as a deployment The following set of samples show how to use an ARM template to execute various Task workflows. 1. [Deploy a docker build on an existing registry](https://github.com/Azure/acr/tree/main/docs/tasks/run-as-deployment/quickdockerbuild-on-existing-registry) 1. [Deploy a docker build on an existing registry using identity and keyvault](https://github.com/Azure/acr/tree/main/docs/tasks/run-as-deployment/quickdockerbuildusingidentitykeyvault) 1. [Create a Registry and perform a docker build from a GitHub repository](https://github.com/Azure/acr/tree/main/docs/tasks/run-as-deployment/quickdockerbuild) 1. [Create a Registry and perform a docker build from a GitHub repository with a Managed Identity](https://github.com/Azure/acr/tree/main/docs/tasks/run-as-deployment/quickdockerbuild) 1. [Create a registry and schedule a quick task with task definition as an argument](https://github.com/Azure/acr/tree/main/docs/tasks/run-as-deployment/quickrun) 1. [Create a registry and schedule a predefined task](https://github.com/Azure/acr/tree/main/docs/tasks/run-as-deployment/taskrun) ================================================ FILE: docs/tasks/run-as-deployment/quickdockerbuild/README.md ================================================ # Quick docker build ## Create a resource group ```bash az group create \ -n mytaskrunrg \ -l westus ``` ## Deploy a registry and a task run which builds/pushes to the registry ```bash registry=$(az deployment group create \ -g mytaskrunrg \ --template-file azuredeploy.json \ --parameters azuredeploy.parameters.json \ --query 'properties.outputs.registry.value' \ -o tsv) ``` ## List the image tag ```bash az acr repository list -n $registry -o tsv \ | xargs -I% az acr repository show-tags -n $registry --repository % --detail -o table ``` ## Crate a user assigned identity identity=$(az identity create \ -g mytaskrunrg \ -n myquickdockerbuildrunwithidentity \ --query 'id' \ -o tsv) ## Deploy a task run which is associated with the user assigned identity and builds/pushes an image to the registry ```bash registry=$(az deployment group create \ -g mytaskrunrg \ --template-file azuredeploy.json \ --parameters azuredeploy.parameters.json \ --parameters userAssignedIdentity=$identity \ --parameters taskRunName=mytaskrunwithidentity \ --query 'properties.outputs.registry.value' \ -o tsv) ``` ## List the image tag ```bash az acr repository list -n $registry -o tsv \ | xargs -I% az acr repository show-tags -n $registry --repository % --detail -o table ``` ================================================ FILE: docs/tasks/run-as-deployment/quickdockerbuild/azuredeploy.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "location": { "type": "string", "defaultValue": "[resourceGroup().location]", "metadata": { "description": "Location for all resources." } }, "registryName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your Azure Container Registry" } }, "registrySku": { "type": "string", "metadata": { "description": "Tier of your Azure Container Registry" }, "defaultValue": "Basic", "allowedValues": [ "Basic", "Standard", "Premium" ] }, "registryAdminUserEnabled": { "type": "bool", "defaultValue": false, "metadata": { "description": "Enable admin user that have push / pull permission to the registry" } }, "taskRunName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your Task Run and tag generated" } }, "userAssignedIdentity": { "type": "string", "metadata": { "description": "The user assigned identity to be bound to the task run" }, "defaultValue": "" }, "sourceLocation": { "type": "string", "metadata": { "description": "The location of the source to build the image" } }, "dockerFilePath": { "type": "string", "metadata": { "description": "The relative path of the dockerfile in the source location" }, "defaultValue": "Dockerfile" }, "repository": { "type": "string", "metadata": { "description": "Repository name for the the build output" } } }, "variables": { "tag": "parameters('taskRunName')", "imageName": "[concat(parameters('repository'), ':', parameters('taskRunName'))]", "identity": { "type": "UserAssigned", "userAssignedIdentities": { "[parameters('userAssignedIdentity')]": { } } } }, "resources": [ { "type": "Microsoft.ContainerRegistry/registries", "name": "[parameters('registryName')]", "apiVersion": "2017-10-01", "location": "[parameters('location')]", "comments": "Container registry for storing docker images", "tags": { "displayName": "Container Registry", "container.registry": "[parameters('registryName')]" }, "sku": { "name": "[parameters('registrySku')]", "tier": "[parameters('registrySku')]" }, "properties": { "adminUserEnabled": "[parameters('registryAdminUserEnabled')]" }, "resources": [ { "type": "taskRuns", "name": "[parameters('taskRunName')]", "location": "[parameters('location')]", "apiVersion": "2019-06-01-preview", "dependsOn": [ "[parameters('registryName')]" ], "identity": "[if(not(empty(parameters('userAssignedIdentity'))), variables('identity'), '')]", "properties": { "runRequest": { "type": "DockerBuildRequest", "dockerFilePath": "[parameters('dockerFilePath')]", "imageNames": [ "[variables('imageName')]" ], "sourceLocation": "[parameters('sourceLocation')]", "isPushEnabled": true, "platform": { "os": "linux", "architecture": "amd64" } } } } ] } ], "outputs": { "registry": { "type": "string", "value": "[parameters('registryName')]" }, "repository": { "type": "string", "value": "[parameters('repository')]" }, "tag": { "type": "string", "value": "[variables('tag')]" } } } ================================================ FILE: docs/tasks/run-as-deployment/quickdockerbuild/azuredeploy.parameters.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", "contentVersion": "1.0.0.0", "parameters": { "registryName": { "value": "mytaskrunregistry" }, "taskRunName": { "value": "myquickdockerbuildrun" }, "repository": { "value": "helloworld-node" }, "sourceLocation": { "value": "https://github.com/Azure-Samples/acr-build-helloworld-node.git" } } } ================================================ FILE: docs/tasks/run-as-deployment/quickdockerbuild-on-existing-registry/README.md ================================================ # Quick docker build on an existing registry The sample shows how to schedule a deployment which will perform a quick docker build on an existing registry from a source located in a GitHub repository. The tag of the image is derived from the `taskRunName` provided during deployment. ## Deploy the Task ```bash registry=$(az deployment group create \ -g mytaskrunrg \ --template-file azuredeploy.json \ --parameters azuredeploy.parameters.json \ --parameters taskRunName=mytaskrunwithidentity \ --query 'properties.outputs.registry.value' \ -o tsv) ``` ## ```bash export registry=mytaskrunregistry export repository=helloworld-node az acr repository show-tags -n $registry --repository $repository --detail -o table ``` ================================================ FILE: docs/tasks/run-as-deployment/quickdockerbuild-on-existing-registry/azuredeploy.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "location": { "type": "string", "defaultValue": "[resourceGroup().location]", "metadata": { "description": "Location for all resources." } }, "registryName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your Azure Container Registry" } }, "taskRunName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your Task Run and tag generated" } }, "userAssignedIdentity": { "type": "string", "metadata": { "description": "The user assigned identity to be bound to the task run" }, "defaultValue": "" }, "sourceLocation": { "type": "string", "metadata": { "description": "The location of the source to build the image" } }, "dockerFilePath": { "type": "string", "metadata": { "description": "The relative path of the dockerfile in the source location" }, "defaultValue": "Dockerfile" }, "repository": { "type": "string", "metadata": { "description": "Repository name for the the build output" } } }, "variables": { "tag": "parameters('taskRunName')", "imageName": "[concat(parameters('repository'), ':', parameters('taskRunName'))]", "identity": { "type": "UserAssigned", "userAssignedIdentities": { "[parameters('userAssignedIdentity')]": { } } } }, "resources": [ { "type": "Microsoft.ContainerRegistry/registries/taskRuns", "apiVersion": "2019-06-01-preview", "name": "[concat(parameters('registryName'), '/', parameters('taskRunName'))]", "location": "[parameters('location')]", "identity": "[if(not(empty(parameters('userAssignedIdentity'))), variables('identity'), '')]", "properties": { "runRequest": { "type": "DockerBuildRequest", "dockerFilePath": "[parameters('dockerFilePath')]", "imageNames": [ "[variables('imageName')]" ], "sourceLocation": "[parameters('sourceLocation')]", "isPushEnabled": true, "platform": { "os": "linux", "architecture": "amd64" } } } } ], "outputs": { "registry": { "type": "string", "value": "[parameters('registryName')]" }, "repository": { "type": "string", "value": "[parameters('repository')]" }, "tag": { "type": "string", "value": "[variables('tag')]" } } } ================================================ FILE: docs/tasks/run-as-deployment/quickdockerbuild-on-existing-registry/azuredeploy.parameters.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", "contentVersion": "1.0.0.0", "parameters": { "registryName": { "value": "mytaskrunregistry" }, "taskRunName": { "value": "myquickdockerbuildrun" }, "repository": { "value": "helloworld-node" }, "sourceLocation": { "value": "https://github.com/Azure-Samples/acr-build-helloworld-node.git" } } } ================================================ FILE: docs/tasks/run-as-deployment/quickdockerbuildusingidentitykeyvault/README.md ================================================ # Quick Docker build using identity and keyvault ## Create a resource group ```bash az group create \ -n mytaskrunrg \ -l westus ``` ## Create a Registry ```bash az acr create \ -n myreg -g mytaskrunrg --sku Standard ``` ## Create a Custom Registry and enable the admin user ```bash az acr create \ -n mycustomreg -g mytaskrunrg --sku Standard --admin-enabled true ``` ## Create a User Identity ```bash az identity create \ -g mytaskrunrg \ -n myquickdockerbuildrunwithidentity ``` ## Create KeyVault ```bash az keyvault create --name mykeyvault --resource-group mytaskrunrg --location eastus2 ``` ## Save registry username/password in the keyvault ```bash #Get password of admin user password=$(az acr credential show --name mycustomreg --query passwords[0].value --output tsv) az keyvault secret set --name username --value mycustomreg --vault-name mykeyvault az keyvault secret set --name password --value $password --vault-name mykeyvault ``` ## Grant identity access to key vault (object-id is the Object ID of managed identity) ```bash #Get principal id of the identity principalId=$(az identity show --resource-group mytaskrunrg --name myquickdockerbuildrunwithidentity --query principalId --output tsv) az keyvault set-policy --name mykeyvault --resource-group mytaskrunrg --object-id $principalId --secret-permissions get ``` ## Deploy a quick run ```bash #Get the custom registry name customregistryName=$(az acr show -n mycustomreg --query loginServer --output tsv) #Get the KeyVault UserName Url userNameUrl=$(az keyvault secret show --name username --vault-name mykeyvault --query id --output tsv) #Get the KeyVault Password Url passwordUrl=$(az keyvault secret show --name password --vault-name mykeyvault --query id --output tsv) #Get the ID of ManagedIdentity managedId=$(az identity show --resource-group mytaskrunrg --name myquickdockerbuildrunwithidentity --query id --output tsv) az deployment group create --resource-group "mytaskrunrg" --template-file azuredeploy.json \ --parameters azuredeploy.parameters.json \ --parameters registryName="myreg" \ --parameters taskRunName="mytaskrun" \ --parameters customRegistryName=$customregistryName \ --parameters userNameUrl=$userNameUrl \ --parameters userPasswordUrl=$passwordUrl \ --parameters repository="hello-world" \ --parameters managedIdResourceId=$managedIdResourceId \ --parameters sourceLocation="https://github.com/Azure-Samples/acr-build-helloworld-node.git" ``` ================================================ FILE: docs/tasks/run-as-deployment/quickdockerbuildusingidentitykeyvault/azuredeploy.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "location": { "type": "string", "defaultValue": "[resourceGroup().location]", "metadata": { "description": "Location for all resources." } }, "registryName": { "type": "string", "metadata": { "description": "Name of your Azure Container Registry" } }, "taskRunName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your Task Run" } }, "userNameUrl": { "type": "string", "metadata": { "description": "The keyvault Url to the UserName" } }, "userPasswordUrl": { "type": "string", "metadata": { "description": "The keyvault Url to the Password" } }, "customRegistryName": { "type": "string", "metadata": { "description": "The full name of the Custom Registry" } }, "repository": { "type": "string", "metadata": { "description": "Repository name for the the build output" } }, "sourceLocation": { "type": "string", "metadata": { "description": "The location of the source to build the image" } }, "dockerFilePath": { "type": "string", "metadata": { "description": "The relative path of the dockerfile in the source location" }, "defaultValue": "Dockerfile" }, "managedIdResourceId": { "type": "string", "metadata": { "description": "The Full Path Of ManagedIdentity" } } }, "variables": { "imageName": "[concat(parameters('repository'), ':', parameters('taskRunName'))]", "idApiVersion": "[first(providers('Microsoft.ManagedIdentity', 'userAssignedIdentities').apiVersions)]" }, "resources": [ { "type": "Microsoft.ContainerRegistry/registries/taskRuns/", "name": "[concat(parameters('registryName'), '/', parameters('taskRunName'))]", "location": "[parameters('location')]", "apiVersion": "2019-06-01-preview", "identity": { "principalId": null, "tenantId": null, "type": "UserAssigned", "userAssignedIdentities": { "[parameters('managedIdResourceId')]": {} } }, "properties": { "runRequest": { "type": "DockerBuildRequest", "imageNames": [ "[variables('imageName')]" ], "sourceLocation": "[parameters('sourceLocation')]", "dockerFilePath": "[parameters('dockerFilePath')]", "values": [], "isPushEnabled": true, "platform": { "os": "linux", "architecture": "amd64" }, "credentials": { "apiVersion": "2018-09-01", "customRegistries": { "[parameters('customRegistryName')]": { "userName": { "type": "Vaultsecret", "value": "[parameters('userNameUrl')]" }, "passsword": { "type": "Vaultsecret", "value": "[parameters('userPasswordUrl')]" }, "identity": "[reference(parameters('managedIdResourceId'), variables('idApiVersion'), 'Full').properties.clientId]" } }, "sourceRegistry": { "loginMode": "Default" } } } } } ], "outputs": { "registry": { "type": "string", "value": "[parameters('registryName')]" }, "repository": { "type": "string", "value": "[parameters('taskRunName')]" } } } ================================================ FILE: docs/tasks/run-as-deployment/quickdockerbuildusingidentitykeyvault/azuredeploy.parameters.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", "contentVersion": "1.0.0.0", "parameters": { "registryName": { "value": "mytaskrunregistry" }, "taskRunName": { "value": "myquickdockerbuildrun" } } } ================================================ FILE: docs/tasks/run-as-deployment/quickdockerbuildwithidentity/README.md ================================================ # Quick Docker build using identity and credential ## Create a resource group ```bash az group create \ -n mytaskrunrg \ -l westus ``` ## Create a Registry ```bash az acr create \ -n myreg -g mytaskrunrg --sku Standard ``` ## Create a Custom Registry ```bash az acr create \ -n mycustomreg -g mytaskrunrg --sku Standard ``` ## Create a User Identity ```bash az identity create \ -g mytaskrunrg \ -n myquickdockerbuildrunwithidentity ``` ## Grant identity access to custom registry ```bash #Get principal ID of the identity principalId=$(az identity show --resource-group mytaskrunrg --name myquickdockerbuildrunwithidentity --query principalId --output tsv) #Get the custom registry ID customRegistryId=$(az acr show -n mycustomreg --query id --output tsv) #Assign the Acrpull role to identity az role assignment create \ --assignee $principalId \ --scope $customRegistryId \ --role acrpull ``` ## Deploy a quick run ```bash #Get the custom registry name customregistryName=$(az acr show -n mycustomreg --query loginServer --output tsv) #Get resource ID of the identity resourceId==$(az identity show --resource-group mytaskrunrg --name myquickdockerbuildrunwithidentity --query id --output tsv) #Get client ID of the identity clientId=$(az identity show --resource-group mytaskrunrg --name myquickdockerbuildrunwithidentity --query clientId --output tsv) az deployment group create --resource-group "mytaskrunrg" --template-file azuredeploy.json \ --parameters azuredeploy.parameters.json \ --parameters registryName="myreg" \ --parameters repository="hello-world" \ --parameters taskRunName="mytaskrun" \ --parameters userAssignedIdentity=$resourceId \ --parameters customRegistry=$customregistryName \ --parameters customRegistryIdentity=$clientId \ --parameters sourceLocation="https://github.com/Azure-Samples/acr-build-helloworld-node.git" ``` ================================================ FILE: docs/tasks/run-as-deployment/quickdockerbuildwithidentity/azuredeploy.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "location": { "type": "string", "defaultValue": "[resourceGroup().location]", "metadata": { "description": "Location for all resources." } }, "registryName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your Azure Container Registry" } }, "registrySku": { "type": "string", "metadata": { "description": "Tier of your Azure Container Registry" }, "defaultValue": "Basic", "allowedValues": [ "Basic", "Standard", "Premium" ] }, "registryAdminUserEnabled": { "type": "bool", "defaultValue": false, "metadata": { "description": "Enable admin user that have push / pull permission to the registry" } }, "taskRunName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your Task Run" } }, "userAssignedIdentity": { "type": "string", "metadata": { "description": "The user assigned identity to be bound to the task run" }, "defaultValue": "" }, "sourceLocation": { "type": "string", "metadata": { "description": "The location of the source to build the image" } }, "dockerFilePath": { "type": "string", "metadata": { "description": "The relative path of the dockerfile in the source location" }, "defaultValue": "Dockerfile" }, "repository": { "type": "string", "metadata": { "description": "Repository name for the the build output" } }, "customRegistry": { "type": "string", "metadata": { "description": "CustomRegistry name for the task" } }, "customRegistryIdentity": { "type": "string", "metadata": { "description": "The identity name for the task" } } }, "variables": { "tag": "parameters('taskRunName')", "imageName": "[concat(parameters('repository'), ':', parameters('taskRunName'))]", "identity": { "type": "UserAssigned", "userAssignedIdentities": { "[parameters('userAssignedIdentity')]": {} } } }, "resources": [ { "type": "Microsoft.ContainerRegistry/registries/taskRuns/", "name": "[concat(parameters('registryName'), '/', parameters('taskRunName'))]", "location": "[parameters('location')]", "apiVersion": "2019-06-01-preview", "identity": "[if(not(empty(parameters('userAssignedIdentity'))), variables('identity'), '')]", "properties": { "runRequest": { "type": "DockerBuildRequest", "imageNames": ["[variables('imageName')]"], "sourceLocation": "[parameters('sourceLocation')]", "dockerFilePath": "[parameters('dockerFilePath')]", "values": [], "isPushEnabled": true, "platform": { "os": "linux", "architecture": "amd64" }, "credentials": { "apiVersion": "2018-09-01", "customRegistries": { "[parameters('customRegistry')]": { "identity": "[parameters('customRegistryIdentity')]" } }, "sourceRegistry": { "loginMode": "Default" } } } } } ], "outputs": { "registry": { "type": "string", "value": "[parameters('registryName')]" }, "repository": { "type": "string", "value": "[parameters('repository')]" }, "tag": { "type": "string", "value": "[variables('tag')]" } } } ================================================ FILE: docs/tasks/run-as-deployment/quickdockerbuildwithidentity/azuredeploy.parameters.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", "contentVersion": "1.0.0.0", "parameters": { "registryName": { "value": "mytaskrunregistry" }, "taskRunName": { "value": "myquickdockerbuildrun" }, "sourceLocation": { "value": "https://github.com/Azure-Samples/acr-build-helloworld-node.git" } } } ================================================ FILE: docs/tasks/run-as-deployment/quickrun/README.md ================================================ # Quick run Deploy a quick run or a set of container using a multi-step task with Managed Identities. ## Create a resource group ```bash az group create \ -n mytaskrunrg \ -l westus ``` ## Create a user assigned identity ```sh identity=$(az identity create \ -g mytaskrunrg \ -n mytaskrunidentity \ --query 'id' \ -o tsv) ``` ## Deploy a registry and a task run which is associated with the user assigned identity and run a multi-step task ```bash registry=$(az deployment group create \ -g mytaskrunrg \ --template-file azuredeploy.json \ --parameters azuredeploy.parameters.json \ --parameters userAssignedIdentity=$identity \ --query 'properties.outputs.registry.value' \ -o tsv) ``` ## Output the run log ```bash az acr task list-runs -r $registry --query '[0].runId' -o tsv |\ xargs -I% az acr task logs -r $registry --run-id % ``` ================================================ FILE: docs/tasks/run-as-deployment/quickrun/azuredeploy.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "location": { "type": "string", "defaultValue": "[resourceGroup().location]", "metadata": { "description": "Location for all resources." } }, "registryName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your Azure Container Registry" } }, "registrySku": { "type": "string", "metadata": { "description": "Tier of your Azure Container Registry" }, "defaultValue": "Basic", "allowedValues": [ "Basic", "Standard", "Premium" ] }, "registryAdminUserEnabled": { "type": "bool", "defaultValue": false, "metadata": { "description": "Enable admin user that have push / pull permission to the registry" } }, "taskRunName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your Task Run" } }, "userAssignedIdentity": { "type": "string", "metadata": { "description": "The user assigned identity to be bound to the task run" }, "defaultValue": "" }, "taskContent": { "type": "string", "metadata": { "description": "The content of multi-step task template" } } }, "variables": { "encodedTaskContent": "[base64(parameters('taskContent'))]", "identity": { "type": "UserAssigned", "userAssignedIdentities": { "[parameters('userAssignedIdentity')]": {} } } }, "resources": [ { "type": "Microsoft.ContainerRegistry/registries", "name": "[parameters('registryName')]", "apiVersion": "2017-10-01", "location": "[parameters('location')]", "comments": "Container registry for storing docker images", "tags": { "displayName": "Container Registry", "container.registry": "[parameters('registryName')]" }, "sku": { "name": "[parameters('registrySku')]", "tier": "[parameters('registrySku')]" }, "properties": { "adminUserEnabled": "[parameters('registryAdminUserEnabled')]" }, "resources": [ { "type": "taskRuns", "name": "[parameters('taskRunName')]", "location": "[parameters('location')]", "apiVersion": "2019-06-01-preview", "dependsOn": [ "[parameters('registryName')]" ], "identity": "[if(not(empty(parameters('userAssignedIdentity'))), variables('identity'), '')]", "properties": { "runRequest": { "type": "EncodedTaskRunRequest", "encodedTaskContent": "[variables('encodedTaskContent')]", "platform": { "os": "linux", "architecture": "amd64" } } } } ] } ], "outputs": { "registry": { "type": "string", "value": "[parameters('registryName')]" } } } ================================================ FILE: docs/tasks/run-as-deployment/quickrun/azuredeploy.parameters.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", "contentVersion": "1.0.0.0", "parameters": { "registryName": { "value": "mytaskrunregistry" }, "taskRunName": { "value": "myquickrun" }, "taskContent": { "value": "version: v1.0.0\nsteps:\n - cmd: microsoft/azure-cli az login --identity\n - cmd: microsoft/azure-cli az account list -o table" } } } ================================================ FILE: docs/tasks/run-as-deployment/taskrun/README.md ================================================ # Task run ## Create a resource group ```bash az group create \ -n mytaskrunrg \ -l westus ``` ## Deploy a task run, which will create the registry, task and schedule a run using the following command ```bash az deployment group create \ --resource-group "mytaskrunrg" --template-file azuredeploy.json --parameters azuredeploy.parameters.json \ --parameters registryName="mytaskrunrg" --parameters --parameters taskName="huanwudfwesttask02" taskRunName="mytaskname" ``` ================================================ FILE: docs/tasks/run-as-deployment/taskrun/azuredeploy.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "location": { "type": "string", "defaultValue": "[resourceGroup().location]", "metadata": { "description": "Location for all resources." } }, "registryName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your Azure Container Registry" } }, "registrySku": { "type": "string", "metadata": { "description": "Tier of your Azure Container Registry" }, "defaultValue": "Basic", "allowedValues": [ "Basic", "Standard", "Premium" ] }, "registryAdminUserEnabled": { "type": "bool", "defaultValue": false, "metadata": { "description": "Enable admin user that have push / pull permission to the registry" } }, "taskName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your Task Run" } }, "taskRunName": { "type": "string", "minLength": 5, "maxLength": 50, "metadata": { "description": "Name of your Task Run" } }, "userAssignedIdentity": { "type": "string", "metadata": { "description": "The user assigned identity to be bound to the task run" }, "defaultValue": "" }, "taskContent": { "type": "string", "metadata": { "description": "The content of multi-step task template" }, "defaultValue": "" }, "sourceLocation": { "type": "string", "metadata": { "description": "The location of the source to build the image" } } }, "variables": { "repository": "hello-world-node", "tag": "parameters('taskRunName')", "imageName": "[concat(variables('repository'), ':', parameters('taskRunName'))]", "identity": { "type": "UserAssigned", "userAssignedIdentities": { "[parameters('userAssignedIdentity')]": {} } } }, "resources": [ { "type": "Microsoft.ContainerRegistry/registries", "apiVersion": "2017-10-01", "name": "[parameters('registryName')]", "location": "[parameters('location')]", "comments": "Container registry for storing docker images", "tags": { "displayName": "Container Registry", "container.registry": "[parameters('registryName')]" }, "sku": { "name": "Standard", "tier": "Standard" }, "properties": { "adminUserEnabled": false } }, { "type": "Microsoft.ContainerRegistry/registries/tasks/", "name": "[concat(parameters('registryName'), '/', parameters('taskName'))]", "location": "[parameters('location')]", "apiVersion": "2019-06-01-preview", "dependsOn": [ "[resourceId('Microsoft.ContainerRegistry/registries', parameters('registryName'))]" ], "properties": { "platform": { "os": "Linux", "architecture": "amd64" }, "step": { "type": "Docker", "imageNames": [ "[variables('imageName')]" ], "dockerFilePath": "Dockerfile", "contextPath": "[parameters('sourceLocation')]", "isPushEnabled": true, "noCache": false }, "trigger": {} } }, { "type": "Microsoft.ContainerRegistry/registries/taskRuns/", "name": "[concat(parameters('registryName'), '/', parameters('taskRunName'))]", "location": "[parameters('location')]", "apiVersion": "2019-06-01-preview", "dependsOn": [ "[resourceId('Microsoft.ContainerRegistry/registries/tasks', parameters('registryName'), parameters('taskName'))]" ], "identity": "[if(not(empty(parameters('userAssignedIdentity'))), variables('identity'), '')]", "properties": { "runRequest": { "type": "TaskRunRequest", "taskName": "[parameters('taskName')]", "taskId": "[resourceId('Microsoft.ContainerRegistry/registries/tasks', parameters('registryName'), parameters('taskName'))]", "values": [], "platform": { "os": "linux", "architecture": "amd64" }, "credentials": {} } } } ] } ================================================ FILE: docs/tasks/run-as-deployment/taskrun/azuredeploy.parameters.json ================================================ { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", "contentVersion": "1.0.0.0", "parameters": { "registryName": { "value": "mytaskrunregistry" }, "taskRunName": { "value": "myquickdockerbuildrun" }, "sourceLocation": { "value": "https://github.com/Azure-Samples/acr-build-helloworld-node.git" } } } ================================================ FILE: docs/tasks/triggers/private-base-image-update.md ================================================ # Track base image update from any Azure Container Registry ACR Tasks supports automated builds for when a container's base iamge is updated. Previously, these automated builds were supported for base images in public repositories, such as DockerHub, or from base images from a task's home registry. Now ACR Tasks can also track base image updates from another ACR than the task's. This base image registry can be **any Azure Container Registry, anywhere in the world**, even geo-replicated. This tutorial combines much of the steps seen in the existing tutorials for [base image update](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-tutorial-base-image-update) and [cross-registry auth](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-tasks-cross-registry-authentication). # Prerequisites In this tutorial you will need to first create two registries in different regions. * The first registry (`homeRegistry`) will be used to create and execute a task. Create this registry in West US. * The second registry (`baseRegistry`) will host a base image that will be used by the task to build an image. Create this registry in East US. ## Prepare Base Registry Fork the repo https://github.com/Azure-Samples/acr-build-helloworld-node.git In the current directory build and push the base image to the base registry ``` az acr build --image baseimages/node:9-alpine --registry baseRegistry --file Dockerfile-base . ``` ## Create a task that tracks the private base image Create a task in the home registry with a system-assigned identity. This task depends on the base image built above. ``` az acr task create \ --registry homeRegistry \ --name hometask \ --image helloworld:v2 \ --context https://github.com/$GIT_USER/acr-build-helloworld-node.git \ --file Dockerfile-app \ --git-access-token $GIT_PAT \ --arg REGISTRY_NAME=baseRegistry.azurecr.io \ --assign-identity ``` ## Give the identity pull permissions to the base registry You must grand the managed identity pull permission to the base image registry. ``` principalID=$(az acr task show --name hometask --registry homeRegistry --query identity.principalId --output tsv) baseregID=$(az acr show --name baseRegistry --query id --output tsv) ``` Only allow the task to pull images from the base image registry. ``` az role assignment create --assignee $principalID --scope $baseregID --role acrpull ``` ## Add target registry credentials to the task Add a custom registry credential to the task so that it can authenticate to the base image registry. ``` az acr task credential add \ --name hometask \ --registry homeRegistry \ --login-server baseRegistry.azurecr.io \ --use-identity [system] ``` ## Manually run the task You must run a task first in order to track the private base image. ``` az acr task run --registry homeRegistry --name hometask ``` ## Update the base image Here you simulate a framework patch in the base image. Edit Dockerfile-base, and add an "a" after the version number defined in NODE_VERSION: `ENV NODE_VERSION 9.11.2a` Now build and push the updated base image to the base registry. ``` az acr build --registry baseRegistry --image baseimages/node:9-alpine --file Dockerfile-base . ``` ## List the updated build ``` az acr task list-runs --registry homeRegistry --name hometask -o table ``` ================================================ FILE: docs/teleport/README.md ================================================ > [!WARNING] > This page is no longer being maintained and will be archived by Tuesday, November 11, 2023. Please visit [aka.ms/acr/artifact-streaming](https://aka.ms/acr/artifact-streaming). # Project Teleport Overview (Private Preview) Instancing a custom environment within seconds is one of the many wonders of running containers. Having to wait for the image and its layers to download & decompress the first time is the current price of admission. ***Project Teleport removes the cost of download and decompression by mounting pre-expanded layers from the Azure Container Registry to Teleport enabled Azure container hosts.*** |Dedicated VM|Teleport w/ACR Tasks | |-|-| |![](./media/vm-aci.gif)|![](./media/tasks-aci.gif)| [![](./media/AzureFridayTeleportPreviewThumb.png)](https://channel9.msdn.com/Shows/Azure-Friday/How-to-expedite-container-startup-with-Project-Teleport-and-Azure-Container-Registry/player#time=21s) ![](./media/teleport-metrics.png) > For more background, please see [Azure Container Registry Adds Teleportation][teleport-blog-post] ## Table of Contents - [Sign Up for the Project Teleport Preview](#sign-up-for-the-project-teleport-preview) - [Supported Services](#supported-services) - [Preview Constraints](#preview-constraints) - [Getting Started with Teleportation & AKS](./aks-getting-started.md) - [Getting Support](#getting-support) ## Sign Up for the Project Teleport Preview In these early stages, we're looking for direct feedback. To request access, please sign up here: [aka.ms/teleport/signup][signup] ## Supported Services - ~~Preview 1 focused on running containers within [ACR Tasks][acr-tasks].~~ - Preview 2 focuses on running containers within [AKS][aks]. See: [Getting Started with AKS][aks-getting-started] Additional services and scenarios will come online as we incorporate more feedback. ## Preview Constraints Preview 2 has the following constraints. Your feedback will help us prioritize this list. - 10 Repository limit. More info: [teleport-repository-management](./teleport-repository-management.md) - Limited to running images with [AKS][aks-getting-started] - Support for [premium registries][acr-tiers] - Registries must exist in the following regions: | Region | Code | | - | - | | East US | EUS | | East US 2 | EUS2 | | South Central US | SCUS | | West US | WUS | | West US 2 | WUS2 | | West Europe | WEU | - Additional regions, including other continents will come online as we get more feedback. - [Geo-replication](https://aka.ms/acr/geo-replication): Geo-replicated registries are not currently supported. - Linux images are currently supported with Windows images coming in a future release. - [ACR Webhook Push notifications][webhooks] occur when the image manifest and compressed blobs are completed. However, layer expansion will take several additional seconds, depending on the size and quantity of layers. We are considering various options when layer expansion has completed within each region, including regionalized `layer-expanded` notifications and enhancements to `az acr repository show`. For now, a `check-expansion.sh` script is provided. ## Getting Support - [Logging Project Teleport related issues](https://github.com/AzureCR/teleport/issues) - [Feature Requests via User Voice](https://aka.ms/acr/uservoice) - [Contact the ACR Product Team](https://github.com/Azure/acr/blob/master/README.md#providing-feedback) ## How Do I... - **Q:** run some baseline Project Teleport examples - **A:** [Getting Started with AKS][aks-getting-started] - **Q:** know when an image is expanded, and ready for teleportation? - **A:** ACR will support a new notification event, as well as CLI support. For Preview 2, you can run the following script, passing in 3 arguments for each image you'd like to check, with credentials saved to environment variables. ```sh ./check-expansion.sh [registryName] [repoName] [tag] [optional: --debug] ./check-expansion.sh demo42t hello-world 1.0 --debug ``` - **Q:** know if a repository is enabled for teleportation? - **A:** Project Teleport must be enabled by the ACR Product team. To verify a repository is enabled, use `az acr repository show`, looking for the `teleportEnabled` attribute. ```sh az acr repository show \ --repository azure-vote-front \ -o jsonc { "changeableAttributes": { "deleteEnabled": true, "listEnabled": true, "readEnabled": true, "teleportEnabled": true, "writeEnabled": true ``` [acr-import]: https://aka.ms/acr/import [acr-tiers]: https://aka.ms/acr/tiers [aks]: https://azure.microsoft.com/services/kubernetes-service/ [aks-getting-started]: ./aks-getting-started.md [cloud-shell]: https://shell.azure.com [signup]: https://aka.ms/teleport/signup [support]: https://github.com/azurecr/teleport/blob/master/README.md#getting-support [teleport-blog-post]: https://stevelasker.blog/2019/10/29/azure-container-registry-teleportation/ [acr-tasks]: https://aka.ms/acr/tasks [webhooks]: https://docs.microsoft.com/en-us/azure/container-registry/container-registry-webhook ================================================ FILE: docs/teleport/aks-getting-started.md ================================================ > [!WARNING] > This page is no longer being maintained and will be archived by Tuesday, November 11, 2023. Please visit [aka.ms/acr/artifact-streaming](https://aka.ms/acr/artifact-streaming). # Integrate Azure Container Registry and Project Teleport with Azure Kubernetes Service (preview) [Project Teleport][project-teleport] allows container hosts to access pre-expanded layers within an [Azure Container Registry (ACR)][acr] that is in the same region as the container host. Using pre-expanded layers removes the time for compute and memory to decompress layers that are already available within the Azure network. Removing this decompression also reduces the time to create the instance of the running container. Using Project Teleport with ACR and AKS is in private preview. See [Enable Project Teleport on an ACR](#enable-project-teleport-on-an-acr) for access. > AKS preview features are available on a self-service, opt-in basis. Previews are provided "as is" and "as available," and they're excluded from the service-level agreements and limited warranty. AKS previews are partially covered by customer support on a best-effort basis. As such, these features aren't meant for production use. AKS preview features aren't available in Azure Government or Azure China 21Vianet clouds. For more information, see the following support articles: > > - [AKS support policies][aks-support-policies] > - [Azure support FAQ][aks-support-faq] > > ACR Preview features are available on a self-service, opt-in basis. Previews are provided "as is" and "as available," and they're excluded from the service-level agreements and limited warranty. ACR previews are partially covered by customer support on a best-effort basis. As such, these features aren't meant for production use. ACR preview features aren't available in Azure Government or Azure China 21Vianet clouds. > - Project Teleport Support is provided through a [Microsoft Teams channel][acr-teleport-red-shirts]. [Requesting access to Project Teleport][teleport-signup-form] ## Prerequisites * Ensure you have the Azure CLI, version 2.13.0 or greater installed. * Ensure you have the `aks-preview` CLI extension 0.4.73 or greater installed. * Ensure you have Project Teleport enabled on your ACR. * Ensure you have the AKS `EnableACRTeleport` feature flag under `Microsoft.ContainerService` enabled. ## Preview Limitations * AKS node pools must use Kubernetes 1.19.7 or greater. * AKS node pools must use `containerd` as the container runtime. AKS clusters with node pools using Kubernetes before 1.19.0 use Moby as the container runtime. * Each ACR used must have Project Teleport enabled. * Each ACR must use the [*Premium* Tier][acr-tiers]. * ACR and AKS must be in the same region. See [Project Teleport supported regions][teleport-regions]. This is less of a limitation, rather a design constraint. It's always a best practice to have the content required for deployment to be within the same region. Project Teleport depends on this best practice to mount layers within an Azure network regional boundary. * Linux containers on AKS clusters are supported. Windows support is not yet available. * Enabling Project Teleport on an existing registry will not convert images already in the registry. To expand existing content, pull and push the image to trigger expansion. _In a future release, enabling Project Teleport on a repository will convert the images. This work is not yet complete._ * ACR Geo-replicated registries are not currently supported on Project Teleport enabled registries. * [Private Links](https://aka.ms/acr/privatelink) are not currently supported on Project Teleport enabled registries. ### Enable Project Teleport on an ACR Create a [premium instance][acr-tiers] of Azure Container Registry in one of the [Project Teleport supported regions][teleport-regions]. Sign up for ACR Project Teleport using [the signup form][teleport-signup-form], providing the resource ID of a **Premium Tier** ACR instance. Once Project Teleport is enabled, you'll receive an confirmation email. #### Set environment variables Configure variables unique to your environment. ```azurecli-interactive AKS=myaks AKS_RG=${AKS}-rg ACR=myacr LOCATION=westus2 K8S_VERSION=1.19.7 ACR_URL=${ACR}.azurecr.io ``` #### Confirming an ACR repository is set to expand Once you receive a confirmation email, push an image to a new repository. You may also use `az acr import` to copy an image from another registry. To confirm an ACR repository has been configured for teleport expansion, use the following command, replacing `$ACR` and `` with your acr and image name. ```azurecli az acr import \ --source mcr.microsoft.com/azuredocs/azure-vote-front:v1 \ --name $ACR \ --image azure-vote-front:v1 az acr import \ --source mcr.microsoft.com/oss/bitnami/redis:6.0.8 \ --name $ACR \ --image redis:6.0.8 az acr repository show -n ${ACR} -o jsonc \ --repository azure-vote-front ``` The following example output shows *"teleportEnabled": true*, verifying Project Teleport is enabled on your ACR. ```console { "changeableAttributes": { "deleteEnabled": true, "listEnabled": true, "readEnabled": true, "teleportEnabled": true, "writeEnabled": true }, ... } ``` #### Confirming an image has been expanded At this point in the Teleport preview, check image expansion using the [check-expansion.sh][acr-check-expansion] script. As the script uses a `/mount` api, basic auth is required. An [ACR Token](https://aka.ms/acr/tokens) is created and saved as environment variable. > Note: Assure [check-expansion.sh](./check-expansion.sh) is set to execute: `sudo chmod +x check-expansion.sh` ```azurecli-interactive export ACR_USER=teleport-token export ACR_PWD=$(az acr token create \ --name teleport-token \ --registry $ACR \ --scope-map _repositories_pull \ --query credentials.passwords[0].value -o tsv) ./check-expansion.sh ${ACR} # example: ./check-expansion.sh myacr azure-vote-front v1 ``` ### Install aks-preview CLI extension To use Project Teleport with ACR and AKS, you need version 0.4.73, or greater, of the *aks-preview* CLI extension. Install the *aks-preview* Azure CLI extension using the [az extension add][az-extension-add] command, or install any available updates using the [az extension update][az-extension-update] command: ```azurecli-interactive # Check the current Azure CLI version az version # Check the current aks-preview extension version az extension list # Install the aks-preview extension az extension add --name aks-preview # Update the extension to make sure you have the latest version installed az extension update --name aks-preview ``` ### Register the AKS `EnableACRTeleport` preview feature To use Teleport with ACR and AKS, you must enable the `EnableACRTeleport` feature flag on your subscription. This feature flag provisions the teleportd client to AKS nodes. Register the AKS `EnableACRTeleport` feature flag using the [az feature register][az-feature-register] command as shown in the following example: ```azurecli-interactive az feature register \ --namespace "Microsoft.ContainerService" \ --name "EnableACRTeleport" ``` It may take 15 minutes or more to complete registration. You can check on the registration status using the [az feature list][az-feature-list] command: ```azurecli-interactive az feature list \ --query "[?contains(name, 'Microsoft.ContainerService/EnableACRTeleport')].{Name:name,State:properties.state}" \ -o table ``` When ready, refresh the registration of the *Microsoft.ContainerService* resource provider using the [az provider register][az-provider-register] command: ```azurecli-interactive az provider register --namespace Microsoft.ContainerService ``` ## Create a new AKS cluster with Teleport enabled To use Teleport with ACR and AKS on a new cluster, create a new AKS cluster and specify your ACR with Project Teleport enabled as well as the `EnableACRTeleport=true` custom header. ```azurecli-interactive # Create the AKS Cluster, attached to ACR, with Project Teleport Enabled az aks create \ --generate-ssh-keys \ -g ${AKS_RG} \ -n ${AKS} \ --attach-acr $ACR \ --kubernetes-version ${K8S_VERSION} \ -l $LOCATION \ --aks-custom-headers EnableACRTeleport=true az aks get-credentials \ -g ${AKS_RG} \ -n ${AKS} ``` ## Add a Project Teleport enabled node pool to an _existing_ AKS cluster To use Project Teleport on an existing AKS cluster, add a node pool to your cluster and set the `EnableACRTeleport=true` custom header. ```azurecli az aks nodepool add \ --name teleportpool \ --cluster-name ${AKS} \ --resource-group ${AKS_RG} \ --kubernetes-version ${K8S_VERSION} \ --aks-custom-headers EnableACRTeleport=true ``` If your AKS cluster doesn't have access to your Project Teleport enabled ACR, attach it. ```azurecli az aks update \ -n ${AKS} \ -g ${AKS_RG} \ --attach-acr ${ACR} ``` ## Verify your cluster has Project Teleport enabled When your cluster has a node pool with Project Teleport enabled, any nodes in that node pool have the `kubernetes.azure.com/enable-acr-teleport-plugin:true` label. You can target this label with a node selector when running pods on your cluster to have specific applications take advantage of Project Teleport. To show the labels on your nodes, get the credentials for your cluster and use `kubectl` to show your nodes. ```azurecli az aks get-credentials -g ${AKS_RG} -n ${AKS} kubectl get nodes ``` Use `kubectl` to get the details of a specific node. Confirm the `kubernetes.azure.com/enable-acr-teleport-plugin:true` label appears in the node details. Note: the full name of the nodepool is not required. Only enough of the name to be unique is required. ```azurecli kubectl describe node aks-nodepool1 ``` The following example output shows the `kubernetes.azure.com/enable-acr-teleport-plugin:true` label in the node details. ```console $ kubectl describe node aks-nodepool1-00000000-vmss000000 ... Name: aks-nodepool1-00000000-vmss000000 Roles: agent Labels: agentpool=nodepool1 ... kubernetes.azure.com/enable-acr-teleport-plugin=true ... ``` ## Next steps - [Deploy two nodes, one with Project Teleport, one without to see the start time differences.](./aks-teleport-comparison.md) For more information about pushing an image into your ACR, see [Push your first image to a private container registry using the Docker CLI][acr-push]. For more information about importing images into your ACR, see [Import container images to a container registry][acr-import]. [acr]: https://aka.ms/acr [acr-check-expansion]: ./check-expansion.sh [acr-import]: https://aka.ms/acr/import [acr-teleport-red-shirts]: https://aka.ms/acr/teleport/red-shirts [acr-tiers]: https://aka.ms/acr/tiers [acr-push]: https://docs.microsoft.com/en-us/azure/container-registry/container-registry-get-started-docker-cli [az-extension-add]: /cli/azure/extension#az-extension-add [az-extension-update]: /cli/azure/extension#az-extension-update [az-feature-list]: /cli/azure/feature#az-feature-list [az-feature-register]: /cli/azure/feature#az-feature-register [az-provider-register]: /cli/azure/provider#az-provider-register [teleport-signup-form]: https://aka.ms/acr/teleport/signup [project-teleport]: https://github.com/azurecr/teleport [teleport-regions]: ./README.md#preview-constraints [aks-support-policies]: https://docs.microsoft.com/azure/aks/support-policies [aks-support-faq]: https://docs.microsoft.com/en-us/azure/aks/faq ================================================ FILE: docs/teleport/aks-teleport-comparison.md ================================================ > [!WARNING] > This page is no longer being maintained and will be archived by Tuesday, November 11, 2023. Please visit [aka.ms/acr/artifact-streaming](https://aka.ms/acr/artifact-streaming). # Comparing Azure Container Registry Project Teleport with standard docker pull, using Azure Kubernetes Service To compare the performance benefits of Project Teleport two deployments will be made allowing the same image to be deployed to an AKS node with Project Teleport, and another without Project Teleport, allowing node recycling to reset the nodes, clearing any cached images. Project Teleport is node specific. If an image is pulled to a node, with teleport enabled, the expanded layers are mounted. If a second copy of the same image is pulled to the same node, even if pulled from Project Teleport expanded repository, the node will identify common layers and mount the local layers from the previously pulled image. To avoid layer sharing, testing the same image with and without Project Teleport enabled, two additional nodepools will be created. The additional nodepools will enable clearing any cached images and layers by scaling the nodepool to zero, then back to one. When complete, the AKS cluster will have (3) nodepools: - `nodepool1` - The system nodepool. No workloads will be scheduled here. - `teleporter` - A Project Teleport enabled nodepool, with a single node - `shuttle` - The standard method of transport of container images, with a single node. This tutorial assumes you've already completed the steps to create a Teleport enabled AKS Cluster, and Teleport enabled ACR Instance. If you haven't already done so, complete the steps in: [Integrate Azure Container Registry and Project Teleport with Azure Kubernetes Service](./aks-getting-started.md) ## Set environment variables Configure variables unique to your environment. Note the ACR and AKS instances must both be in one of the [teleport supported regions][teleport-regions]. ```azurecli-interactive AKS=myaks AKS_RG=${AKS}-rg LOCATION=westus2 K8S_VERSION=1.19.7 ACR=myacr ACR_URL=${ACR}.azurecr.io ``` ## Clone the Teleport samples repo Sample kubernetes deployment files and a `check-expansion.sh` script are provided at: https://github.com/Azure/acr/tree/main/docs/teleport. Only a few files are necessary. You may either `git clone` the repo, or copy the individual files referenced below. ```bash git clone https://github.com/Azure/acr.git ``` ## Import images for teleportation For completeness of this walkthrough, the azure-vote application is used, which includes a 944mb `azure-vote-front:v1` image. To expand the layers, import the images into a teleport enabled registry, in the same region as the AKS cluster. ```azurecli-interactive az acr import \ --source mcr.microsoft.com/azuredocs/azure-vote-front:v1 \ --name $ACR \ --image azure-vote-front:v1 az acr import \ --source mcr.microsoft.com/oss/bitnami/redis:6.0.8 \ --name $ACR \ --image redis:6.0.8 ``` ## Confirm import and teleport expansion Confirm the `azure-vote-front` repository is set for teleport expansion: ```azurecli az acr repository show \ --repository azure-vote-front \ -o jsonc ``` Look for `"teleportEnabled": true,` in the output ```json "changeableAttributes": { "deleteEnabled": true, "listEnabled": true, "readEnabled": true, "teleportEnabled": true, "writeEnabled": true } ``` Although the repository is configured for teleport expansion, each image upload will take time to be expanded on push. The length of time is based on the quantity and size of layers, however the expansion should be completed within seconds. > **Note:** ACR webhooks indicate when an artifact is pushed and available. The push event occurs prior to layer expansion. In a future release, a new webhook and EventGrid notification will be added indicating the image has been expanded, and ready for teleportation. At this point in the Teleport preview, check expansion using the [check-expansion.sh][acr-check-expansion] script. As the script uses a `/mount` api, basic auth is required. An [ACR Token](https://aka.ms/acr/tokens) is created and saved as environment variables. > Note: If `check-expansion.sh` fails to execute (`-bash: ./check-expansion.sh: Permission denied`), assure the file is set to executable: `sudo chmod +x ./check-expansion.sh` ```azurecli-interactive export ACR_USER=teleport-token export ACR_PWD=$(az acr token create \ --name teleport-token \ --registry $ACR \ --scope-map _repositories_pull \ --query credentials.passwords[0].value -o tsv) ./check-expansion.sh teleport azure-vote-front v1 ``` ## Add nodes for teleporters and shuttles Two nodepools will be added to enable teleportation, and a comparison for standard transport. An `acr-teleport` label is added for scheduling onto specific nodes. The system nodepool (`nodepool1`) is avoided to enable [clearing the image cache](#cleanup) by scaling the nodepool to zero then back to one. ```azurecli-interactive az aks nodepool add \ --resource-group $AKS_RG \ --cluster-name $AKS \ --name teleporter \ --node-count 1 \ --aks-custom-headers EnableACRTeleport=true \ --labels acr-teleport=enabled az aks nodepool add \ --resource-group $AKS_RG \ --cluster-name $AKS \ --name shuttle \ --node-count 1 \ --labels acr-teleport=disabled ``` ## Deploy to AKS Update the [azure-vote-teleport.yaml](./samples/azure-vote-teleport.yaml) and [azure-vote-shuttle.yaml](./samples/azure-vote-shuttle.yaml) files to reference your registry name: ```yml spec: ... containers: - name: azure-vote-back image: .azurecr.io/redis:6.0.8 ... containers: - name: azure-vote-front image: .azurecr.io/azure-vote-front:v1 ``` ### Deploy with standard pull performance Deploy the _shuttle_ podspec: ```azurecli-interactive kubectl apply -f azure-vote-shuttle.yaml ``` Get the list of pods to find the azure-vote-front pod. The shorthand version can be used if only one pod is named `azure-vote-front`. You may need to run the command a few times until the image has been pulled and expanded on the node. ```azurecli-interactive kubectl get pods kubectl describe pod azure-vote-front-shuttle ``` Under the `events` list, an entry for `Successfully pulled image...` provides the pull time. Note the length before proceeding to the teleport version. ```bash Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 36s default-scheduler Successfully assigned default/azure-vote-front-5c976dbbd9-tckdz to aks-shuttle-10583637-vmss000000 Normal Pulling 35s kubelet Pulling image ".azurecr.io/azure-vote-front:v1" Normal Pulled 1s kubelet Successfully pulled image ".azurecr.io/azure-vote-front:v1" in 34.738162s ``` If `already present on machine` is returned, this indicates the image was previously pulled and cached. See [recycle nodepool](#cleanup) to clear the cache. ```bash Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 2m12s default-scheduler Successfully assigned default/azure-vote-front-5bdfc85f9c-d7z8b to aks-shuttle-10583637-vmss000000 Normal Pulled 2m11s kubelet Container image ".azurecr.io/azure-vote-front:v1" already present on machine ``` ### Deploy with Teleport performance Deploy the _teleport_ podspec: ```azurecli-interactive kubectl apply -f azure-vote-teleport.yaml ``` Get the list of pods to find the azure-vote-front pod. The shorthand version can be used if only one pod is named `azure-vote-front`. You may need to run the command a few times until the image has been pulled and expanded on the node. ```azurecli-interactive kubectl describe pod azure-vote-front-teleport ``` Under the `events` list, an entry for `Successfully pulled image...` provides the pull time. Note the length should be dramatically faster. ```bash Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 12s default-scheduler Successfully assigned default/azure-vote-front-teleport-5bf865d976-lm4bj to aks-teleporter-10583637-vmss000000 Normal Pulling 11s kubelet Pulling image ".azurecr.io/azure-vote-front:v1" Normal Pulled 3s kubelet Successfully pulled image ".azurecr.io/azure-vote-front:v1" in 8.011045191s Normal Created 3s kubelet Created container azure-vote-front-teleport Normal Started 3s kubelet Started container azure-vote-front-teleport ``` ### Browse the apps To view the voting apps: ```azurecli-interactive kubectl get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE azure-vote-back-shuttle ClusterIP 10.0.159.177 6379/TCP 15m azure-vote-back-teleport ClusterIP 10.0.229.255 6379/TCP 21m azure-vote-front-shuttle LoadBalancer 10.0.155.50 x.x.x.x 80:30276/TCP 15m azure-vote-front-teleport LoadBalancer 10.0.60.1 x.x.x.x 80:30259/TCP 21m kubernetes ClusterIP 10.0.0.1 443/TCP 118m ``` Copy the EXTERNAL-IP, and paste in the browser to view each voting app. ### Cleanup To reset the nodes, delete the two deployments: ```azurecli-interactive kubectl delete -f azure-vote-teleport.yaml kubectl delete -f azure-vote-shuttle.yaml ``` Clear the image cache, and any teleport mounts: ```azurecli-interactive az aks scale \ --resource-group $AKS_RG \ --name $AKS \ --nodepool-name teleporter \ --node-count 0 az aks scale \ --resource-group $AKS_RG \ --name $AKS \ --nodepool-name teleporter \ --node-count 1 az aks scale \ --resource-group $AKS_RG \ --name $AKS \ --nodepool-name shuttle \ --node-count 0 az aks scale \ --resource-group $AKS_RG \ --name $AKS \ --nodepool-name shuttle \ --node-count 1 ``` ## Performance profile of teleport The Voting app has a significant performance delta, comparing a normal pull time of 34.7 seconds, with 8.0 seconds of teleport. While impressive, you may have assumed a larger difference. Teleport prototype-1 is based on mounting expanded layers. For each layer of an image, the decompression of a layer is traded off for a mount. Therefore, the larger the layer count, the slightly longer the start time. The `azure-vote-front` image has ***29 layers***, which requires 29 mounts. When pulling the image without teleport, the 944mb of content must be decompressed, but multiple decompression threads can run concurrently. ```bash docker inspect .azurecr.io/azure-vote-front:v1 ... "RootFS": { "Type": "layers", "Layers": [ "sha256:e27a10675c5656bafb7bfa9e4631e871499af0a5ddfda3cebc0ac401dfe19382", "sha256:851f3e348c69d8959d326f0bab975c03f9813eec33aba389aa7c569953510433", "sha256:06f4de5fefeae30802d336e8c234b9c0989542fb80efd4f83be06c41aba26d9f", "sha256:b31411566900643c38169980a21093c23e0a12a12ffea78b1921d07dd40372bd", "sha256:6662dddae6aa455371366ed12400556a29e049373ea27c089a24634e3098cb48", "sha256:4ea12feed6a9386d7bdac8b26073b1209f0f39781a5d157026dfb5a918c95db7", "sha256:e7cee6196d865755606c73b82004784273cd423217cba8faf650b6707d3b5059", "sha256:b15d32f8b6aa975b8be84e825952094d2f20296777a2bb5fad3fb270ca05a776", "sha256:5e8efc7c6f4fee7fecfc685b742293a5300cc3180262a144a2fed54c46597129", "sha256:4f6e0a34a0535f5cd6b76d06aae861c3ced179b3b115d2850af0e2f0bcefeffc", "sha256:5ac43729c58be5ecb0fc13b164fb4f06f0afc13394735e8ac10cdb0f75311195", "sha256:491bd929c5bfcb0639a6c43d07be0aac225dc0da28379e99f617480599825e5f", "sha256:b18e79d2742360b7b0d81493e8a8beced51953c8a8f73fe4b228e47e8aeb292b", "sha256:55ebcfb2ad17cafdada768b6ca43e3f4e51bc589757b22337b94a499354aa052", "sha256:1a350e9420b7eac6b50172334afd6354d89749c62822951596bac9085cb9fb1e", "sha256:7b3929993879466a3abee028d3fb490d83c211ab5723e29765ed17c98db5b4e3", "sha256:ddeb470c209923815a410d35dd45a6710bc285955b5ef30d92a003d38bd68f3b", "sha256:c30da5f5d23cec0997d05337dd1113872ba56b38a59bf96922572f07d65b94a0", "sha256:a7364327f2826e4991e3675271350c9e7b858e33abbe77aacfbbff00a4b59455", "sha256:f8af872e501840a2de13260830960f612560d9ee755ae07a37c30758e8568444", "sha256:57fe04427c69233864b729d60d3c9c7fe8a43e950cb442a650101a357998c8c2", "sha256:cde1a4e95d8bea636972733fde8f223d1dda2c2425ec7de8ca5b078391723c11", "sha256:efa870440d9c6defc6447ad9d3d214312ba3dc0c665c723b793d14d241d811e1", "sha256:a9f64da753644ba8e18846cb23010c8e730de34701b5f519591167722a89784b", "sha256:2131d41261d2d13cae8b024c5a20e65fe8ee8f98d04bfd238124210b94115d69", "sha256:9d93163e41ffdad4659db82f267091b4a478f1235be1b25438407e79e80ed28b", "sha256:d9aeb057eef2070b1260cceeefb0933755f62504cf34efe2bf4f113043bf7493", "sha256:5e85a99d34e4a9aea5bdc845fb30587b172393ebf7d71ddbb1b325e3fa728090", "sha256:ab48c9fa73df063cfafaa0338c06ec44ba3d29a3ce6adde3fedf42d2d0c0ee91" ] ``` ### Comparing fewer layers To gauge the difference between layers and size, clone the [Azure-Samples/azure-voting-app-redis](https://github.com/Azure-Samples/azure-voting-app-redis) repo and build the image with the `--squash` flag. ```bash docker build --force-rm --squash -t ${ACR}.azurecr.io/azure-vote-front:squashed . docker push ${ACR}.azurecr.io/azure-vote-front:squashed ``` - Change the `:tag` references in `azure-vote-shuttle.yaml` and `azure-vote-teleport.yaml` files to reference the `:squashed` tag. ```yaml containers: - name: azure-vote-front-teleport image: .azurecr.io/azure-vote-front:squashed ``` - Follow the steps above to [recycle](#cleanup) the nodes, and [redeploy the two apps](#deploy-with-standard-pull-performance). The resulting times should reflect **~31.7 seconds** for the standard docker pull/decompress and **~1.9 seconds** for teleportation of a single layer. | Size | Layers | Docker | Teleport| |-|-|-|-| | 944mb | 28 | 34.7 | 8 | | 929mb | 1 | 31.7 | 1.9 | That's a _further_ reduction from 8.0 seconds for 28 layers to 1.9 seconds for a single layer. This highlights the performance profile of mounting expanded layers, compared to pulling and decompressing layers. #### Shuttle deployed single layer image ``` Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 9m55s default-scheduler Successfully assigned default/azure-vote-front-6ff785596-4d62g to aks-shuttle-10583637-vmss000000 Normal Pulling 9m54s kubelet Pulling image ".azurecr.io/azure-vote-front:squashed" Normal Pulled 9m22s kubelet Successfully pulled image ".azurecr.io/azure-vote-front:squashed" in 31.711601788s ``` #### Teleported single layer image ``` Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 37s default-scheduler Successfully assigned default/azure-vote-front-teleport-5fbc9b754f-lrtpw to aks-teleporter-10583637-vmss000000 Normal Pulling 36s kubelet Pulling image ".azurecr.io/azure-vote-front:squashed" Normal Pulled 34s kubelet Successfully pulled image ".azurecr.io/azure-vote-front:squashed" in 1.891985507s ``` ### Balancing layers and size While you might consider flattening your images to one layer for fast mounting, you may have contention on a single mount point. The purpose of the Project Teleport preview is to get further metrics on the usage to understand the art and science of image layers. The Project Teleport design does not require an image owner to make changes to use Teleport. Teleport works with your existing container images. However, with each technology, there are always optimizations that may be made based on the deployment target. One thing is always common about image performance. The smaller you can make your overall container image, the faster it will run. ## Providing feedback Please contact the Project Teleport technicians, and other fellow Teleport Red-Shirts in the [Teleport Red-Shirts teams channel][acr-teleport-red-shirts] [acr-check-expansion]: ./check-expansion.sh [acr-teleport-red-shirts]: https://aka.ms/acr/teleport/red-shirts [teleport-regions]: ./README.md#preview-constraints ================================================ FILE: docs/teleport/check-expansion.sh ================================================ #!/bin/bash #usage: check-expansion.sh acr-name repo tag #usage: eg: check-expansion.sh demo42 /demo42/hello-world 2.1 #usage: eg: check-expansion.sh demo42 /demo42/hello-world 2.1 --debug # Assumes ACR_USER and ACR_PWD are set to valid ACRPULL role # Retrieve the ACR_PWD with the following command, if the Admin account is enabled # ACR_PWD="$(az acr credential show -n demo42t --query passwords[0].value -o tsv)" # NOTE: Repo scoped tokens will be coming online in November ACR_NAME=$1 ACR_REPO=$2 ACR_TAG=$3 DEBUG=$4 # Troubleshooting if [[ $DEBUG = '--debug' ]]; then echo "Parameter Validation:" echo " ACR_USER: ${ACR_USER}" echo " ACR_PWD : ${ACR_PWD}" echo " ACR_NAME: ${ACR_NAME}" echo " ACR_REPO: ${ACR_REPO}" echo " ACR_TAG : ${ACR_TAG}" fi echo "Getting Access Token" ACR_ACCESS_TOKEN=$(curl -s -u ${ACR_USER}:${ACR_PWD} "https://${ACR_NAME}.azurecr.io/oauth2/token?service=${ACR_NAME}.azurecr.io&scope=repository:${ACR_REPO}:pull" | sed -e 's/[{}]/''/g' | awk -v RS=',"' -F: '/access_token/ {print $2}' | sed 's/^.//;s/.$//') if [[ $ACR_ACCESS_TOKEN == '' ]]; then echo "Could not get access token, make sure credentials are accurate and have pull access" exit 1 fi if [[ $DEBUG == '--debug' ]]; then echo " ACR_ACCESS_TOKEN: ${ACR_ACCESS_TOKEN}" fi echo "Finding Digest for $ACR_REPO:$ACR_TAG https://$ACR_NAME.azurecr.io/v2/$ACR_REPO/manifests/$ACR_TAG" ACR_DIGEST=$(curl -s -L -I \ -H "Accept: application/vnd.docker.distribution.manifest.v2+json" \ -H "Authorization: Bearer ${ACR_ACCESS_TOKEN}" \ https://$ACR_NAME.azurecr.io/v2/$ACR_REPO/manifests/$ACR_TAG | grep ^Docker-Content-Digest | awk '{print $2}' | head -c 71) if [[ $DEBUG == '--debug' ]]; then echo " ACR_DIGEST: ${ACR_DIGEST}" fi if [[ $ACR_DIGEST == '' ]]; then echo "Could not return the image digest. Confirm the identity has access to the repo, and the image exists." exit 1 fi echo "Checking https://$ACR_NAME.azurecr.io/mount/v1/$ACR_REPO/_manifests/$ACR_DIGEST" while true do STATUS=$(curl -s -o /dev/null -w "%{http_code}" \ -H "Authorization: Bearer ${ACR_ACCESS_TOKEN}" \ "https://$ACR_NAME.azurecr.io/mount/v1/$ACR_REPO/_manifests/$ACR_DIGEST") echo "Status: ${STATUS}" if [ $STATUS -eq 200 ]; then echo "Teleport: layers ready" break elif [ $STATUS -eq 409 ]; then echo "Teleport: expanding layers" elif [ $STATUS -eq 404 ]; then echo "Teleport: ${ACR_NAME}-${ACR_REPO}:${ACR_TAG} not enabled" break else echo "Unknown status $STATUS" fi sleep 2 done ================================================ FILE: docs/teleport/collecting-teleportd-logs-aks.md ================================================ > [!WARNING] > This page is no longer being maintained and will be archived by Tuesday, November 11, 2023. Please visit [aka.ms/acr/artifact-streaming](https://aka.ms/acr/artifact-streaming). # Collecting Logs for Teleport on AKS This guide goes over how to collect logs for the teleportd daemon running in an AKS cluster. The steps in this guide have to be carried out by customers in order to collect log information for debugging purposes. ## Teleportd logs We can get teleportd logs from each node independently to verify that a teleport enabled image succesfully teleported. Because Teleportd is run as a daemon in the node, its logs are only available from journald in a node. To get these a user can either connect to their node which ran the specific pod being debugged (using ssh or lens) and run `journalctl -n all -u teleportd`. Alternatively you can collect logs by creating sidecar container on the corresponding nodes like we do. The sidecar mounts the filesystem of the nodepool and uses this to obtain journald logs. These are created using the following configuration: ```yaml apiVersion: v1 kind: Pod metadata: name: teleport-logs spec: containers: - name: log-reader image: busybox args: [/bin/sh, -c, '/bin/journalctl -n all -u teleportd -f'] volumeMounts: - name: rootfs mountPath: / # Add if customer needs to specify node # nodeSelector: # teleport: "true" volumes: - name: rootfs hostPath: path: / type: Directory ``` You can run a pod with the above configuration (make sure to edit the nodeSelector field to set the log collection to the specific node that needs its instance of teleportd to be debugged) and then get the logs from it by calling: `kubectl logs teleport-logs > ./teleport-daemon.log` ## Other options ### Kubernetes Events (Aside) If the affected pod was just ran (events have a short timespan) you can use the following to gather some extra information and even confirm the image teleported: For event collection we are looking at two methods: - Running `kubectl describe pod ` Teleportd events try to associate with the pod that first pulled a specific image within a node, nonetheless this can fail, in such a scenario events are not associated with a node but are still reported as general events and will still be visible when running kubectl get events. If everything goes rights the output of this command will include the teleport events, otherwise: - Running `kubectl get events` In cases when teleportd fails to associate events with a pod or when multiple pods experienced issues, check all the kubectl get events, they are all sourced from the teleportd client and are marked as such, some will have the image and tag that was teleport or if it failed to teleport. Events include overall failure information for teleportd but do not currently give information on individual layer failures. If an image took too long to teleport for example and the events still indicate success there could be individual layer mount failures, refer to the logs in that scenario. ================================================ FILE: docs/teleport/edit-teleport-attribute.sh ================================================ #!/bin/bash #usage: edit-teleport-attribute.sh acr-name repo enable #usage: eg: edit-teleport-attribute.sh demo42 /demo42/hello-world 2.1 disable #usage: eg: edit-teleport-attribute.sh demo42 /demo42/hello-world 2.1 enable --debug # Assumes ACR_USER and ACR_PWD are set to valid ACRPULL role # Retrieve the ACR_PWD with the following command, if the Admin account is enabled # ACR_PWD="$(az acr credential show -n demo42t --query passwords[0].value -o tsv)" # NOTE: Repo scoped tokens will be coming online in November ACR_NAME=$1 ACR_REPO=$2 STATE=$3 DEBUG=$4 # Troubleshooting if [[ $DEBUG = '--debug' ]]; then echo "Parameter Validation:" echo " ACR_USER: ${ACR_USER}" echo " ACR_PWD : ${ACR_PWD}" echo " ACR_NAME: ${ACR_NAME}" echo " ACR_REPO: ${ACR_REPO}" fi echo "Getting Access Token" ACR_ACCESS_TOKEN=$(curl -s -u ${ACR_USER}:${ACR_PWD} "https://${ACR_NAME}.azurecr.io/oauth2/token?service=${ACR_NAME}.azurecr.io&scope=repository:${ACR_REPO}:pull,push" | sed -e 's/[{}]/''/g' | awk -v RS=',"' -F: '/access_token/ {print $2}' | sed 's/^.//;s/.$//') if [[ $ACR_ACCESS_TOKEN == '' ]]; then echo "Could not get access token, make sure credentials are accurate and have pull access" exit 1 fi if [[ $DEBUG == '--debug' ]]; then echo " ACR_ACCESS_TOKEN: ${ACR_ACCESS_TOKEN}" fi SET_STATE="" if [[ $STATE == 'enable' ]]; then SET_STATE="{\"teleportEnabled\": true }" fi if [[ $STATE == 'disable' ]]; then SET_STATE="{\"teleportEnabled\": false }" fi if [[ $DEBUG == '--debug' ]]; then echo " SET_STATE: ${SET_STATE}" fi echo "Sendng Patch Request for $ACR_REPO" RESULT=$(curl -s -S \ -H "Content-Type: application/json" \ -H "Authorization: Bearer ${ACR_ACCESS_TOKEN}" \ --request PATCH \ --data "${SET_STATE}" \ https://$ACR_NAME.azurecr.io/acr/v1/$ACR_REPO) echo " RESULT: ${RESULT}" ================================================ FILE: docs/teleport/find-teleport-enabled-repositories.sh ================================================ #!/bin/bash # Prerequisites: # azure cli (logged in) # jq # usage: find-teleport-enabled-repositories.sh acr-name ACR_NAME=$1 enabled_repos=() IFS=$'\n' # Each iteration of the for loop should read until we find an end-of-line for row in $(az acr repository list --name ${ACR_NAME} | jq '.[]' | jq @sh) do # Run the row through the shell interpreter to remove enclosing double-quotes stripped=$(echo $row | xargs echo) stripped=$(echo $stripped | xargs echo) is_teleport_enabled=$(az acr repository show --name "${ACR_NAME}" --repository "${stripped}" --query "changeableAttributes.teleportEnabled") if [[ "$is_teleport_enabled" = 'true' ]]; then echo "$stripped -> Enabled" enabled_repos+=("$stripped") else echo "$stripped -> Disabled" fi done unset IFS echo "" echo "Summary:" echo "Enabled Repositories:" echo "" for value in "${enabled_repos[@]}" do echo $value done ================================================ FILE: docs/teleport/samples/azure-vote-shuttle.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: name: azure-vote-back-shuttle spec: replicas: 1 selector: matchLabels: app: azure-vote-back-shuttle template: metadata: labels: app: azure-vote-back-shuttle spec: nodeSelector: "beta.kubernetes.io/os": linux "acr-teleport": "disabled" containers: - name: azure-vote-back-shuttle image: .azurecr.io/redis:6.0.8 env: - name: ALLOW_EMPTY_PASSWORD value: "yes" resources: requests: cpu: 100m memory: 128Mi limits: cpu: 250m memory: 256Mi ports: - containerPort: 6379 name: redis --- apiVersion: v1 kind: Service metadata: name: azure-vote-back-shuttle spec: ports: - port: 6379 selector: app: azure-vote-back-shuttle --- apiVersion: apps/v1 kind: Deployment metadata: name: azure-vote-front-shuttle spec: replicas: 1 selector: matchLabels: app: azure-vote-front-shuttle template: metadata: labels: app: azure-vote-front-shuttle spec: nodeSelector: "beta.kubernetes.io/os": linux "acr-teleport": disabled containers: - name: azure-vote-front-shuttle image: .azurecr.io/azure-vote-front:v1 resources: requests: cpu: 100m memory: 128Mi limits: cpu: 250m memory: 256Mi ports: - containerPort: 80 env: - name: REDIS value: "azure-vote-back-shuttle" --- apiVersion: v1 kind: Service metadata: name: azure-vote-front-shuttle spec: type: LoadBalancer ports: - port: 80 selector: app: azure-vote-front-shuttle ================================================ FILE: docs/teleport/samples/azure-vote-teleport.yaml ================================================ apiVersion: apps/v1 kind: Deployment metadata: name: azure-vote-back-teleport spec: replicas: 1 selector: matchLabels: app: azure-vote-back-teleport template: metadata: labels: app: azure-vote-back-teleport spec: nodeSelector: "beta.kubernetes.io/os": linux "acr-teleport": enabled containers: - name: azure-vote-back-teleport image: .azurecr.io/redis:6.0.8 env: - name: ALLOW_EMPTY_PASSWORD value: "yes" resources: requests: cpu: 100m memory: 128Mi limits: cpu: 250m memory: 256Mi ports: - containerPort: 6379 name: redis --- apiVersion: v1 kind: Service metadata: name: azure-vote-back-teleport spec: ports: - port: 6379 selector: app: azure-vote-back-teleport --- apiVersion: apps/v1 kind: Deployment metadata: name: azure-vote-front-teleport spec: replicas: 1 selector: matchLabels: app: azure-vote-front-teleport template: metadata: labels: app: azure-vote-front-teleport spec: nodeSelector: "beta.kubernetes.io/os": linux "acr-teleport": enabled containers: - name: azure-vote-front-teleport image: .azurecr.io/azure-vote-front:v1 resources: requests: cpu: 100m memory: 128Mi limits: cpu: 250m memory: 256Mi ports: - containerPort: 80 env: - name: REDIS value: "azure-vote-back-teleport" --- apiVersion: v1 kind: Service metadata: name: azure-vote-front-teleport spec: type: LoadBalancer ports: - port: 80 selector: app: azure-vote-front-teleport ================================================ FILE: docs/teleport/teleport-repository-management.md ================================================ > [!WARNING] > This page is no longer being maintained and will be archived by Tuesday, November 11, 2023. Please visit [aka.ms/acr/artifact-streaming](https://aka.ms/acr/artifact-streaming). # Manage Repositories in Teleport Enabled Registries ## Existing Limitations - Registries must first be Teleport enabled to enable repositories - There is a current 10 teleport enabled repository limit for registries ## Existing Flow At the moment if a repository is teleport enabled, this means it can expand images that are pushed into it making them into the teleport format. Note that this does not interfere with regular registry storage and teleportable (expanded) layers are stored in a separate storage than typical layers. Making a repository enabled does not however expand all existing images in it, rather all images pushed after the fact will be expanded. This can be best illustrated with examples, take a new empty registry that has already been teleport enabled, in this case we can summarize its state as: Registry A Properties - Teleport enabled -> Repositories (none) Pushing any image to Registry A will result in the creation of a teleport enabled repository, for visualization: Push ubuntu:18.0.1 to Registry A Registry A Properties - Teleport enabled -> Repositories ubuntu (Teleport Enabled) |-> Tag: 18.0.1 Can be pulled from teleport client Now consider a non empty registry, registry B that has just had teleport enabled Registry B Properties - Teleport enabled -> Repositories python (Not Teleport Enabled) |-> Tag: latest Cannot be pulled from teleport client Pushing a new image not already present will result in: Push ubuntu:18.0.1 to Registry B Registry B Properties - Teleport enabled -> Repositories python (Not Teleport Enabled) |-> Tag: latest Cannot be pulled from teleport client ubuntu (Teleport Enabled) |-> Tag: 18.0.1 Can be pulled from teleport client If we want to enable python we can similarly re push the python image resulting in: Push python:latest to Registry B Registry B Properties - Teleport enabled -> Repositories python (Teleport Enabled) |-> Tag: latest Can be pulled from teleport client ubuntu (Teleport Enabled) |-> Tag: 18.0.1 Can be pulled from teleport client A tricky thing however is that if instead of the last step we pushed a different image for repository python (where latest and 2.2 dont share a digest) we would see this state: Push python:2.2 to Registry B Registry B Properties - Teleport enabled -> Repositories python (Teleport Enabled) |-> Tag: latest Cannot be pulled from teleport client Tag: 2.2 Can be pulled from teleport client ubuntu (Teleport Enabled) |-> Tag: 18.0.1 Can be pulled from teleport client ## Manually Select which Repositories Are Teleport Enabled The previous operation does not give customers much control over which registries are teleport enabled as a result we do have an existing flow to chose which repositories are enabled and which arent, note however that as long as a repository is under the 10 repo limit all pushes to non teleport enabled repositories (new or otherwise) will become teleport enabled (even if the flag here was set manually to disabled) The previous step should have provided some insights as to how we currently enable teleport on a repository. Here is how to explicitely set and check which repositories are teleport enabled. ### Identify which repositories are teleport enabled To currently identify if a speific repository is teleport enabled we can run the following command and looking at the teleport enabled field: ```bash az acr repository show --repository -o jsonc { "changeableAttributes": { "deleteEnabled": true, "listEnabled": true, "readEnabled": true, "teleportEnabled": true, "writeEnabled": true } } ``` We have also provided a convenicence script to find this out for all repositories in a registry in case there are a lot and determining this becomes difficult. P.S this is not super fast but saves manually checking one by one. > Note: Assure [find-teleport-enabled-repositories.sh](./find-teleport-enabled-repositories.sh) is set to execute: `sudo chmod +x find-teleport-enabled-repositories.sh` You can run this as follows: ```bash ./find-teleport-enabled-repositories.sh registry-name ``` Sample output: ```bash /find-teleport-enabled-repositories.sh teleporttest gcc -> Enabled glassfish -> Enabled jupyter/all-spark-notebook -> Enabled python -> Disabled rethinkdb -> Enabled Summary: Enabled Repositories: gcc glassfish jupyter/all-spark-notebook rethinkdb ``` ### Manually disable teleport on a repository For the time being we disable teleport on a repository using the [edit-teleport-attribute.sh](./edit-teleport-attribute.sh) included in this repo. This can be used by first setting env variables for credentials: > Note: Assure [edit-teleport-attribute.sh](./edit-teleport-attribute.sh) is set to execute: `sudo chmod +x edit-teleport-attribute.sh` ```bash export ACR_USER=teleport-token export ACR_PWD=$(az acr token create \ --name teleport-token \ --registry $ACR \ --scope-map _repositories_pull \ --query credentials.passwords[0].value -o tsv) edit-teleport-attribute.sh disable --debug ``` ### Manually enable teleport on a repository Once the registry has less than 10 teleportable repositories enabled, the next repository for which an image is pushed that is not already teleport enabled will become teleport enabled. As a result there is no direct need of enabling teleport for it, instead you can just push an image to said repository. Nonetheless for completeness [edit-teleport-attribute.sh](./edit-teleport-attribute.sh) script can set this metadata field to enable teleport on a repository manually (you will still need to push afterwards so the image will expand in the background). ```bash export ACR_USER=teleport-token export ACR_PWD=$(az acr token create \ --name teleport-token \ --registry $ACR \ --scope-map _repositories_pull \ --query credentials.passwords[0].value -o tsv) edit-teleport-attribute.sh enable --debug ``` After, new images pushed to the enabled repository will be expanded and teleportable. ## FAQ - Can the Teleport 10 repository limit be raised? ``` The 10 repository limit is a temporary measure during the private preview phase. We do not currently have a way to raise this value for a particular registry. If necessary users can request a second registry to be teleport enabled ``` - Will pushing one image to an existing repository (making it teleport enabled) expand all existing tags making them teleportable? ``` Unfortnately we do not currently support backfill so only layers contained in pushes after teleport has been enabled on the registry will be fixed ``` - Is there a timeline to improve this behaviour? ``` We have a plan to improve this behaviour but the exact timeline is not set in stone ``` - How can I tel if an image in a teleport enabled repository is actually teleportable? ``` This can be done using the check-expansion script also provided in this registry ``` ================================================ FILE: notifications/README.md ================================================ # ACR Notifications This page will capture ACR Service notifications. ## Security Notifications | Date | Notification | Status | | - | - | - | | September 26, 2020 | [ACR Helm Repo Security Advisory](./helm-repo-failure-20200918-.md)-fail to push | **Resolution Complete** | ================================================ FILE: notifications/helm-repo-failure-20200918-.md ================================================ # ACR Helm Repo Security Advisory |Date | Status | |-|-| | September 26, 2020| Mitigation Complete | | September 25, 2020| Azure Commercial/Public Complete, China/.gov under way | | September 24, 2020| Active - in-deployment | | September 22, 2020| Active - testing solution | | September 18, 2020 | Identified | ## Rollout Complete - Following [Azure safe deployment practices](https://azure.microsoft.com/en-us/blog/advancing-safe-deployment-practices/), the ACR engineering team has completed deployments to all Azure Commercial/Public, China and US Gov regions. ## Does this Issue Apply to You - Using `helm repo` features with Azure Container Registry - yes - Using the helm client, version `v3.3.1` ***or lower*** - yes - Using **[helm 3 registry](https://helm.sh/docs/topics/registries/)** features that persist helm charts as OCI Artifacts - **this does NOT apply** to you ## Issue Summary - ACR generates yaml content within the `index.yaml` index to identify where the chart is stored within the content store ```yaml apiVersion: v1 entries: wordpress: - acrMetadata: manifestDigest: sha256:08ef434162070bba4256414c80b001d15b7503ef2a1a4fa1f60bab174f80d4d7 appVersion: 5.1.0 created: "2019-03-06T16:59:25.8892193Z" ``` - To mitigate security concerns, unrelated to how ACR annotates `index.yaml`, the helm client no longer supports additional content within the `index.yaml` index file causing helm v3.3.2 or higher to fail. - ACR is actively rolling out a server side change to generate newly complaint content, expected to be completed by Monday September 28, 2020 ```yaml apiVersion: v1 entries: wordpress: - annotations azurecr.io/manifest-digest: sha256:08ef434162070bba4256414c80b001d15b7503ef2a1a4fa1f60bab174f80d4d7 appVersion: 5.1.0 created: "2019-03-06T16:59:25.8892193Z" ``` ## Azure Container Registry User Guidance Evaluating the information provided to the [Helm security advisories](https://github.com/helm/helm/security/advisories): - [Repository index file allows for duplicates of the same chart entry](https://github.com/helm/helm/security/advisories/GHSA-jm56-5h66-w453) - [Sanitizing plugin names](https://github.com/helm/helm/security/advisories/GHSA-m54r-vrmv-hw33) - [plugin.yaml file allows for duplicate entries](https://github.com/helm/helm/security/advisories/GHSA-c52f-pq47-2r9j) We can provide some guidance to avoid customers being stuck between the proverbial rock and hard place while we rollout a server side change to regenerate the `index.yaml` files. First, some scoping to this guidance: - [ACR is a private registry](https://aka.ms/acr), providing [private repository storage of Helm charts](https://aka.ms/acr/helm-repos) - ACR implements [Azure Security Benchmarks](https://docs.microsoft.com/en-us/azure/container-registry/security-baseline) minimizing man in the middle attacks addressed in the helm security issues. - The customer controls access to their resource, and can monitor requests through [ACR Audit Logs](https://aka.ms/acr/audit-logs) - The helm client being used to pull charts from ACR is scoped to only pulling charts from ACR, and not public locations outside the control of the customers environment ## Getting Unblocked with Existing Workflows Based on the above guidance, we recognize that customers can not be blocked for their deployments and we provide the following guidance: - Use a helm client <= `v3.3.1` to access ACR content ONLY - Helm server deployed within kubernetes is unrelated - Do NOT use the same client to pull public charts as the concerns noted in the security advisories may apply - As ACR completes the rollout tracked [here](https://aka.ms/acr/advisories), update to `v3.3.2` or newer tested clients. - Once the rollout is complete, users will need to push any chart to their repository, trigger a regeneration of the `index.yaml` file, conforming to new helm behavior ## Go Forward Plan - [ACR & Helm Roadmap](https://github.com/Azure/acr/blob/main/docs/acr-roadmap.md#acr-helm-ga): Consider moving to [`helm registry`](https://helm.sh/docs/topics/registries/) support, which uses OCI Artifact to persist charts as all other artifacts in a registry. While there are gaps that typically apply to public repositories, the majority of requirements are covered for private deployments. - ACR customers will benefit from [Private Link](https://aka.ms/acr/privatelink), [Auto-purge](https://aka.ms/acr/auto-purge) with enhancements coming this fall, on-prem registries and more - The Helm community is [establishing a security notification process](https://github.com/helm/community/issues/128). Please weigh into the discussion with your thoughts and concerns. - Consider importing public content to public registries, where you can security scan, test and verify the components you depend on work in your environment. This includes public charts, images and binaries like the helm client. See guidance on [Consuming Upstream Content in Your Software or Service](https://stevelasker.blog/2020/09/01/consuming-upstream-content/) ## What Is the ACR Bug As noted in [Helm repo add fails with Azure Container Registry #8761](https://github.com/helm/helm/issues/8761), ACR is writing data to the chart for tracking, but it's no longer considered valid. Why you might ask? - good question: ACR `helm repo` was an initial experiment that persists helm charts within the Azure Container Registry infrastructure, providing all the production security, reliability, performance and sovereignty capabilities of ACR, to helm charts. As helm charts are stored within a registry, as a content addressable blob, we needed a means to store the content digest information. The example below shows wordpress being stored with it's digest. ```yaml apiVersion: v1 entries: wordpress: - acrMetadata: manifestDigest: sha256:08ef434162070bba4256414c80b001d15b7503ef2a1a4fa1f60bab174f80d4d7 appVersion: 5.1.0 created: "2019-03-06T16:59:25.8892193Z" ``` The information isn't required by the client, but it's stored in the `index.yaml` as it worked, and it solved the need. Prior to `helm v3.3.1` string elements were supported. The result of a security audit triggered the helm team to implement string yaml validation. We fully support the security fix and recognize the gap in security notifications between the helm project and consumers. ### The Fix - The ACR service will change the `index.yaml` formatting of the digest to conform to helm annotations: ```yaml apiVersion: v1 entries: wordpress: - annotations azurecr.io/manifest-digest: sha256:08ef434162070bba4256414c80b001d15b7503ef2a1a4fa1f60bab174f80d4d7 appVersion: 5.1.0 created: "2019-03-06T16:59:25.8892193Z" ``` - To trigger a server side regeneration of the `index.yaml` a new chart will need to be pushed to acr ```shell helm create test helm package ./test az acr helm push ./test-0.1.0.tgz ``` - Once a push is complete, the `index.yaml` will be updated to conform to the new annotations format - The ACR Helm update must be completed in the region hosting the registry for the new format to be generated - Once complete, all helm clients will continue to function, including new helm clients > `v3.3.1` ## Q&A - **Q: Why was this security fix rolled out before a mitigation was in place?** - **A:** There was no formal process in place for the Helm project to communicate security fixes to its consumers, nor did ACR confirm there was a security notification in process before supporting this capability. This is a good lesson for all consumers of open source projects to evaluate. - **Q: Can ACR update my registry so I don't have push a chart to get the new format?** - **A:** We evaluated back filling, however this would have taken longer to verify and implied higher risk as the number of registries supporting `helm repo` worldwide is substantial - **Q: Am I secure in using helm < `v3.3.1` in production, even though there are security advisories?** - **A:** Security should always be a concern. If you follow the above guidance for using `helm repo` uniquely with ACR, where you control all the content which was securely pushed by members of your trusted circle, you should be comfortable proceeding. The known security issues involve compromised registries, or shared public registries that share a single `index.yaml` - **Q: How can I secure my helm chart usage?** - **A:** Consuming public content is an advantage and a risk. We recommend importing all public content and tooling being used within your organization. Security scan it, test each updated version and automate the process to assure you are always working with recent content, that is both secured and tested for your environment. See guidance on [Consuming Upstream Content in Your Software or Service](https://stevelasker.blog/2020/09/01/consuming-upstream-content/) Please see [Azure Container Registry feedback & support](https://aka.ms/acr/links?#providing-feedback) for additional information. ================================================ FILE: samples/dotnetcore/image-transfer/ContainerRegistryTransfer/Clients/ExportClient.cs ================================================ using ContainerRegistryTransfer.Helpers; using ContainerRegistryTransfer.Models; using Microsoft.Azure.Management.ContainerRegistry; using Microsoft.Azure.Management.ContainerRegistry.Models; using Microsoft.Azure.Management.KeyVault; using Microsoft.Azure.Management.ResourceManager.Fluent; using System; using System.Threading.Tasks; using Task = System.Threading.Tasks.Task; namespace ContainerRegistryTransfer.Clients { internal class ExportClient { ContainerRegistryManagementClient registryClient; KeyVaultManagementClient keyVaultClient; Options options; public ExportClient(ContainerRegistryManagementClient registryClient, KeyVaultManagementClient keyVaultClient, Options options) { this.registryClient = registryClient; this.keyVaultClient = keyVaultClient; this.options = options; } public async Task CreateExportPipelineAsync() { var exportPipelineName = options.ExportPipeline.PipelineName; Console.WriteLine($"Creating exportPipeline {exportPipelineName}."); var exportPipeline = await CreateExportPipelineResourceAsync().ConfigureAwait(false); Console.WriteLine($"Successfully created exportPipeline {exportPipelineName}."); // give the pipeline identity access to the key vault await KeyVaultHelper.AddKeyVaultAccessPolicyAsync( keyVaultClient, exportPipelineName, options.TenantId, options.ExportPipeline.ResourceGroupName, options.ExportPipeline.KeyVaultUri, IdentityHelper.GetManagedIdentityPrincipalId(exportPipeline.Identity)); return exportPipeline; } public async Task CreateExportPipelineResourceAsync() { var exportResourceGroupName = options.ExportPipeline.ResourceGroupName; var exportRegistryName = options.ExportPipeline.RegistryName; var registry = await registryClient.Registries.GetAsync( exportResourceGroupName, exportRegistryName).ConfigureAwait(false); if (registry != null) { var exportPipeline = new ExportPipeline( name: options.ExportPipeline.PipelineName, location: registry.Location, identity: IdentityHelper.GetManagedIdentity(options.ExportPipeline.UserAssignedIdentity), target: new ExportPipelineTargetProperties { Type = "AzureStorageBlobContainer", Uri = options.ExportPipeline.ContainerUri, KeyVaultUri = options.ExportPipeline.KeyVaultUri }, options: options.ExportPipeline.Options ); return await registryClient.ExportPipelines.CreateAsync(registryName: options.ExportPipeline.RegistryName, resourceGroupName: options.ExportPipeline.ResourceGroupName, exportPipelineName: options.ExportPipeline.PipelineName, exportPipelineCreateParameters: exportPipeline).ConfigureAwait(false); } else { throw new ArgumentException($"Could not find registry '{exportRegistryName}'. Please ensure the registry exists in the current resource group {exportResourceGroupName}."); } } public async Task ExportImagesAsync(ExportPipeline exportPipeline) { var pipelineId = exportPipeline.Id; var pipelineRunName = options.ExportPipelineRun.PipelineRunName; var targetName = options.ExportPipelineRun.TargetName; var artifacts = options.ExportPipelineRun.Artifacts; Console.WriteLine($"Export PipelineRun properties:"); Console.WriteLine($" registryName: {options.ExportPipeline.RegistryName}"); Console.WriteLine($" pipelineRunName: {options.ExportPipelineRun.PipelineRunName}"); Console.WriteLine($" pipelineResourceId: {pipelineId}"); Console.WriteLine($" targetName: {options.ExportPipelineRun.TargetName}"); Console.WriteLine($" artifacts: {string.Join(Environment.NewLine, artifacts)}"); Console.WriteLine($"======================================================================"); var pipelineRunRequest = new PipelineRunRequest { PipelineResourceId = pipelineId, Target = new PipelineRunTargetProperties { Type = "AzureStorageBlob", Name = targetName }, Artifacts = artifacts }; Console.WriteLine($"Running pipelineRun {pipelineRunName}..."); var pipelineRun = await registryClient.PipelineRuns.CreateAsync(registryName: options.ExportPipeline.RegistryName, resourceGroupName: options.ExportPipeline.ResourceGroupName, pipelineRunName: pipelineRunName, request: pipelineRunRequest).ConfigureAwait(false); if (string.Equals(pipelineRun.ProvisioningState, "Failed", StringComparison.OrdinalIgnoreCase)) { Console.WriteLine($"PipelineRun {pipelineRunName} failed with the inner error '{pipelineRun.Response.PipelineRunErrorMessage}'."); } else { Console.WriteLine($"PipelineRun {pipelineRunName} completed successfully!"); Console.WriteLine($"Uploaded blob {targetName} to {options.ExportPipeline.ContainerUri}."); } } } } ================================================ FILE: samples/dotnetcore/image-transfer/ContainerRegistryTransfer/Clients/ImportClient.cs ================================================ using ContainerRegistryTransfer.Helpers; using ContainerRegistryTransfer.Models; using Microsoft.Azure.Management.ContainerRegistry; using Microsoft.Azure.Management.ContainerRegistry.Models; using Microsoft.Azure.Management.KeyVault; using System; using System.Threading.Tasks; namespace ContainerRegistryTransfer.Clients { internal class ImportClient { ContainerRegistryManagementClient registryClient; KeyVaultManagementClient keyVaultClient; Options options; public ImportClient(ContainerRegistryManagementClient registryClient, KeyVaultManagementClient keyVaultClient, Options options) { this.registryClient = registryClient; this.keyVaultClient = keyVaultClient; this.options = options; } public async Task CreateImportPipelineAsync() { var importPipelineName = options.ImportPipeline.PipelineName; Console.WriteLine($"Creating importPipeline {importPipelineName}."); var importPipeline = await CreateImportPipelineResourceAsync().ConfigureAwait(false); Console.WriteLine($"Successfully created importPipeline {importPipelineName}."); // give the pipeline identity access to the key vault await KeyVaultHelper.AddKeyVaultAccessPolicyAsync( keyVaultClient, importPipelineName, options.TenantId, options.ImportPipeline.ResourceGroupName, options.ImportPipeline.KeyVaultUri, IdentityHelper.GetManagedIdentityPrincipalId(importPipeline.Identity)); return importPipeline; } public async Task CreateImportPipelineResourceAsync() { var importResourceGroupName = options.ImportPipeline.ResourceGroupName; var importRegistryName = options.ImportPipeline.RegistryName; var registry = await registryClient.Registries.GetAsync( importResourceGroupName, importRegistryName).ConfigureAwait(false); if (registry != null) { var importPipeline = new ImportPipeline( name: options.ImportPipeline.PipelineName, location: registry.Location, identity: IdentityHelper.GetManagedIdentity(options.ImportPipeline.UserAssignedIdentity), source: new ImportPipelineSourceProperties { Type = "AzureStorageBlobContainer", Uri = options.ImportPipeline.ContainerUri, KeyVaultUri = options.ImportPipeline.KeyVaultUri }, trigger: new PipelineTriggerProperties { SourceTrigger = new PipelineSourceTriggerProperties { Status = "Enabled" } }, options: options.ImportPipeline.Options ); return await registryClient.ImportPipelines.CreateAsync(registryName: registry.Name, resourceGroupName: options.ImportPipeline.ResourceGroupName, importPipelineName: options.ImportPipeline.PipelineName, importPipelineCreateParameters: importPipeline).ConfigureAwait(false); } else { throw new ArgumentException($"Could not find registry '{importRegistryName}'. Please ensure the registry exists in the current resource group {importResourceGroupName}."); } } } } ================================================ FILE: samples/dotnetcore/image-transfer/ContainerRegistryTransfer/ContainerRegistryTransfer.csproj ================================================ Exe netcoreapp3.1 latest PreserveNewest ================================================ FILE: samples/dotnetcore/image-transfer/ContainerRegistryTransfer/Helpers/AzureHelper.cs ================================================ using Microsoft.Azure.Management.ContainerRegistry; using ContainerRegistryTransfer.Models; using Microsoft.Azure.Management.KeyVault; using Microsoft.Azure.Management.ResourceManager.Fluent; using Microsoft.Azure.Management.ResourceManager.Fluent.Authentication; using System; namespace ContainerRegistryTransfer.Helpers { public static class AzureHelper { public static AzureCredentials GetAzureCredentials(AzureEnvironment environment, string tenantId, string miClientId, string spClientId, string spClientSecret) { if (string.IsNullOrWhiteSpace(tenantId)) { throw new ArgumentNullException(nameof(tenantId)); } if (!string.IsNullOrWhiteSpace(miClientId)) { return new AzureCredentials( new MSILoginInformation(MSIResourceType.VirtualMachine, miClientId), environment, tenantId); } else if (!string.IsNullOrWhiteSpace(spClientId) && !string.IsNullOrWhiteSpace(spClientSecret)) { return new AzureCredentials( new ServicePrincipalLoginInformation { ClientId = spClientId, ClientSecret = spClientSecret }, tenantId, environment); } else { throw new ArgumentNullException("No subscription credential."); } } public static ContainerRegistryManagementClient GetContainerRegistryManagementClient(Options options) { var credential = GetAzureCredentials( options.AzureEnvironment, options.TenantId, options.MIClientId, options.SPClientId, options.SPClientSecret); var subscriptionId = options.SubscriptionId; if (string.IsNullOrWhiteSpace(subscriptionId)) { throw new ArgumentNullException(nameof(subscriptionId)); } var registryClient = new ContainerRegistryManagementClient(credential.WithDefaultSubscription(subscriptionId)); registryClient.SubscriptionId = subscriptionId; return registryClient; } public static KeyVaultManagementClient GetKeyVaultManagementClient(Options options) { var credential = GetAzureCredentials( options.AzureEnvironment, options.TenantId, options.MIClientId, options.SPClientId, options.SPClientSecret); var subscriptionId = options.SubscriptionId; if (string.IsNullOrWhiteSpace(subscriptionId)) { throw new ArgumentNullException(nameof(subscriptionId)); } var keyVaultClient = new KeyVaultManagementClient(credential.WithDefaultSubscription(subscriptionId)); keyVaultClient.SubscriptionId = subscriptionId; return keyVaultClient; } } } ================================================ FILE: samples/dotnetcore/image-transfer/ContainerRegistryTransfer/Helpers/IdentityHelper.cs ================================================ using Microsoft.Azure.Management.ContainerRegistry.Models; using System.Collections.Generic; using System.Linq; namespace ContainerRegistryTransfer.Helpers { public static class IdentityHelper { public static IdentityProperties GetManagedIdentity(string userAssignedIdentity) { if (!string.IsNullOrEmpty(userAssignedIdentity)) { return new IdentityProperties { Type = ResourceIdentityType.UserAssigned, UserAssignedIdentities = new Dictionary { { userAssignedIdentity, new UserIdentityProperties() } } }; } else { return new IdentityProperties { Type = ResourceIdentityType.SystemAssigned }; } } public static string GetManagedIdentityPrincipalId(IdentityProperties identity) { return identity.PrincipalId ?? identity.UserAssignedIdentities.First().Value.PrincipalId; } } } ================================================ FILE: samples/dotnetcore/image-transfer/ContainerRegistryTransfer/Helpers/KeyVaultHelper.cs ================================================ using Microsoft.Azure.Management.ContainerRegistry; using Microsoft.Azure.Management.KeyVault; using Microsoft.Azure.Management.KeyVault.Models; using System; using System.Collections.Generic; using Task = System.Threading.Tasks.Task; namespace ContainerRegistryTransfer.Helpers { public static class KeyVaultHelper { public static async Task AddKeyVaultAccessPolicyAsync(KeyVaultManagementClient keyVaultClient, string pipelineName, string tenantId, string resourceGroupName, string vaultUri, string identityPrincipalId) { var vaultName = GetKVNameFromUri(vaultUri); Console.WriteLine($"Adding accessPolicy for pipeline '{pipelineName}' to vault '{vaultName}'."); var vault = await keyVaultClient.Vaults.GetAsync(resourceGroupName, vaultName).ConfigureAwait(false); if (vault != null) { var accessPolicy = new AccessPolicyEntry { TenantId = new System.Guid(tenantId), ObjectId = identityPrincipalId, Permissions = new Permissions { Secrets = new List { { "get" } } } }; if (vault.Properties.AccessPolicies.Contains(accessPolicy)) { Console.WriteLine($"The vault '{vaultName}' already contains this access policy for principalId '{identityPrincipalId}'. Skip."); } else { Console.WriteLine($"Adding access policy for principalId '{identityPrincipalId} to the vault '{vaultName}'."); await keyVaultClient.Vaults.UpdateAccessPolicyAsync( resourceGroupName, vaultName, AccessPolicyUpdateKind.Add, new VaultAccessPolicyParameters { Properties = new VaultAccessPolicyProperties { AccessPolicies = new List() { accessPolicy } } }).ConfigureAwait(false); } } else { throw new ArgumentException($"Could not find key vault '{vaultName}'. Please ensure the vault exists in the current resource group {resourceGroupName}."); } } private static string GetKVNameFromUri(string keyVaultUri) { var vaultUri = new Uri(keyVaultUri); return vaultUri?.Host?.Split('.')[0]; } } } ================================================ FILE: samples/dotnetcore/image-transfer/ContainerRegistryTransfer/Models/Options.cs ================================================ using Microsoft.Azure.Management.ResourceManager.Fluent; using System; namespace ContainerRegistryTransfer.Models { public class Options { public string Environment { get; set; } public string TenantId { get; set; } public string MIClientId { get; set; } public string SPClientId { get; set; } public string SPClientSecret { get; set; } public string SubscriptionId { get; set; } public PipelineConfig ExportPipeline { get; set; } public PipelineConfig ImportPipeline { get; set; } public PipelineRunConfig ExportPipelineRun { get; set; } public AzureEnvironment AzureEnvironment { get { return string.IsNullOrWhiteSpace(Environment) ? AzureEnvironment.AzureGlobalCloud : AzureEnvironment.FromName(Environment); } } public void Validate() { if (string.IsNullOrWhiteSpace(TenantId)) { throw new ArgumentNullException(nameof(TenantId)); } if (string.IsNullOrWhiteSpace(MIClientId) && (string.IsNullOrWhiteSpace(SPClientId) || string.IsNullOrWhiteSpace(SPClientSecret))) { throw new ArgumentNullException($"Missing {nameof(MIClientId)} or {nameof(SPClientId)}/{nameof(SPClientSecret)}"); } if (string.IsNullOrWhiteSpace(SubscriptionId)) { throw new ArgumentNullException(nameof(SubscriptionId)); } ExportPipeline.Validate(); ImportPipeline.Validate(); } } } ================================================ FILE: samples/dotnetcore/image-transfer/ContainerRegistryTransfer/Models/PipelineConfig.cs ================================================ using System; using System.Collections.Generic; namespace ContainerRegistryTransfer.Models { public class PipelineConfig { public string ResourceGroupName { get; set; } public string RegistryName { get; set; } public string PipelineName { get; set; } public string KeyVaultUri { get; set; } public string ContainerUri { get; set; } // Resource ID of the user assigned managed identity. If this property is ommitted, // a system-assigned identity will be provisioned for the pipeline. public string UserAssignedIdentity { get; set; } public List Options { get; set; } public void Validate() { if (string.IsNullOrWhiteSpace(ResourceGroupName)) { throw new ArgumentNullException(nameof(ResourceGroupName)); } if (string.IsNullOrWhiteSpace(RegistryName)) { throw new ArgumentNullException(nameof(RegistryName)); } if (string.IsNullOrWhiteSpace(KeyVaultUri)) { throw new ArgumentNullException(nameof(KeyVaultUri)); } if (string.IsNullOrWhiteSpace(ContainerUri)) { throw new ArgumentNullException(nameof(ContainerUri)); } if (string.IsNullOrWhiteSpace(PipelineName)) { throw new ArgumentNullException(nameof(PipelineName)); } } } } ================================================ FILE: samples/dotnetcore/image-transfer/ContainerRegistryTransfer/Models/PipelineRunConfig.cs ================================================ using System; using System.Collections.Generic; namespace ContainerRegistryTransfer.Models { public class PipelineRunConfig { public string PipelineRunName { get; set; } public string TargetName { get; set; } public List Artifacts { get; set; } public void Validate() { if (string.IsNullOrWhiteSpace(PipelineRunName)) { throw new ArgumentNullException(nameof(PipelineRunName)); } if (string.IsNullOrWhiteSpace(TargetName)) { throw new ArgumentNullException(nameof(TargetName)); } if (Artifacts == null || Artifacts.Count == 0) { throw new ArgumentNullException(nameof(Artifacts)); } } } } ================================================ FILE: samples/dotnetcore/image-transfer/ContainerRegistryTransfer/Program.cs ================================================ using ContainerRegistryTransfer.Clients; using ContainerRegistryTransfer.Helpers; using ContainerRegistryTransfer.Models; using Microsoft.Extensions.Configuration; using System; using System.IO; using System.Threading.Tasks; using Task = System.Threading.Tasks.Task; namespace ContainerRegistryTransfer { internal class Program { public static async Task Main(string[] args) { try { string appSettingsFile = args.Length > 0 ? args[0] : Path.Combine(Path.GetDirectoryName(typeof(Program).Assembly.Location), "appsettings.json"); var options = LoadOptions(appSettingsFile); options.Validate(); // Use ACR Transfer to move artifacts between two registries await TransferRegistryArtifacts(options).ConfigureAwait(false); return 0; } catch (Exception ex) { Console.WriteLine(); Console.WriteLine($"Failed with the following error:"); Console.WriteLine(ex); return -1; } } private static async Task TransferRegistryArtifacts(Options options) { var exportOptionsDisplay = options.ExportPipeline.Options != null ? string.Join(", ", options.ExportPipeline.Options) : ""; var importOptionsDisplay = options.ImportPipeline.Options != null ? string.Join(", ", options.ImportPipeline.Options) : ""; Console.WriteLine($"Starting ContainerRegistryTransfer..."); Console.WriteLine(); Console.WriteLine($"Azure Environment properties:"); Console.WriteLine($" MIClientId: {options.MIClientId}"); Console.WriteLine($" SPClientId: {options.SPClientId}"); Console.WriteLine($" AzureEnvironment: {options.AzureEnvironment.Name}"); Console.WriteLine($" SubscriptionId: {options.SubscriptionId}"); Console.WriteLine($"======================================================================"); Console.WriteLine($"ExportPipeline properties:"); Console.WriteLine($" ResourceGroupName: {options.ExportPipeline.ResourceGroupName}"); Console.WriteLine($" RegistryName: {options.ExportPipeline.RegistryName}"); Console.WriteLine($" ExportPipelineName: {options.ExportPipeline.PipelineName}"); Console.WriteLine($" UserAssignedIdentity: {options.ExportPipeline.UserAssignedIdentity}"); Console.WriteLine($" StorageUri: {options.ExportPipeline.ContainerUri}"); Console.WriteLine($" KeyVaultSecretUri: {options.ExportPipeline.KeyVaultUri}"); Console.WriteLine($" Options: {exportOptionsDisplay}"); Console.WriteLine($"======================================================================"); Console.WriteLine($"ImportPipeline properties:"); Console.WriteLine($" ResourceGroupName: {options.ImportPipeline.ResourceGroupName}"); Console.WriteLine($" RegistryName: {options.ImportPipeline.RegistryName}"); Console.WriteLine($" ImportPipelineName: {options.ImportPipeline.PipelineName}"); Console.WriteLine($" UserAssignedIdentity: {options.ImportPipeline.UserAssignedIdentity}"); Console.WriteLine($" StorageUri: {options.ImportPipeline.ContainerUri}"); Console.WriteLine($" KeyVaultSecretUri: {options.ImportPipeline.KeyVaultUri}"); Console.WriteLine($" Options: {importOptionsDisplay}"); Console.WriteLine($"======================================================================"); Console.WriteLine(); var registryClient = AzureHelper.GetContainerRegistryManagementClient(options); var keyVaultClient = AzureHelper.GetKeyVaultManagementClient(options); var exportClient = new ExportClient(registryClient, keyVaultClient, options); var exportPipeline = await exportClient.CreateExportPipelineAsync().ConfigureAwait(false); var importClient = new ImportClient(registryClient, keyVaultClient, options); var importPipeline = await importClient.CreateImportPipelineAsync().ConfigureAwait(false); Console.WriteLine(); Console.WriteLine($"======================================================================"); Console.WriteLine($"Your importPipeline '{importPipeline.Name}' will run automatically."); Console.WriteLine($"Would you like to run your exportPipeline '{options.ExportPipeline.PipelineName}'? [Y/N]"); var response = Console.ReadLine(); if (string.Equals("Y", response, StringComparison.InvariantCultureIgnoreCase)) { Console.WriteLine("Validating pipelineRun configurations for export."); options.ExportPipelineRun.Validate(); await exportClient.ExportImagesAsync(exportPipeline).ConfigureAwait(false); } Console.WriteLine("ContainerRegistryTransfer completed. Goodbye!"); } private static Options LoadOptions(string appSettingsFile) { var builder = new ConfigurationBuilder() .AddJsonFile(appSettingsFile, optional: true) .AddEnvironmentVariables(); var options = new Options(); builder.Build().Bind(options); return options; } } } ================================================ FILE: samples/dotnetcore/image-transfer/ContainerRegistryTransfer/appsettings.json ================================================ { "Environment": "", "TenantId": "", "MIClientId": "", "SPClientId": "", "SPClientSecret": "", "SubscriptionId": "", "ExportPipeline": { "ResourceGroupName": "", "RegistryName": "", "PipelineName": "", "KeyVaultUri": "", "ContainerUri": "", "UserAssignedIdentity": "", "Options": [] }, "ImportPipeline": { "ResourceGroupName": "", "RegistryName": "", "PipelineName": "", "KeyVaultUri": "", "ContainerUri": "", "UserAssignedIdentity": "", "Options": [] }, "ExportPipelineRun": { "PipelineRunName": "", "TargetName": "", "Artifacts": [] } } ================================================ FILE: samples/dotnetcore/image-transfer/ContainerRegistryTransfer.sln ================================================  Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio Version 16 VisualStudioVersion = 16.0.30320.27 MinimumVisualStudioVersion = 10.0.40219.1 Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ContainerRegistryTransfer", "ContainerRegistryTransfer\ContainerRegistryTransfer.csproj", "{939E1C02-5F96-4982-B72D-419CE87D4C8F}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU Release|Any CPU = Release|Any CPU EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {939E1C02-5F96-4982-B72D-419CE87D4C8F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {939E1C02-5F96-4982-B72D-419CE87D4C8F}.Debug|Any CPU.Build.0 = Debug|Any CPU {939E1C02-5F96-4982-B72D-419CE87D4C8F}.Release|Any CPU.ActiveCfg = Release|Any CPU {939E1C02-5F96-4982-B72D-419CE87D4C8F}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {4D5F1172-6ED2-46DB-BF72-0B09F70A42A9} EndGlobalSection EndGlobal ================================================ FILE: samples/dotnetcore/image-transfer/README.md ================================================ ## Getting Started with Azure Container Registry Transfer - in DotNetCore ## This sample will allow you to transfer artifacts between two registries through a storage account. You will * Create an exportPipeline in your source registry. * Create an importPipeline in your target registry. * Create key vault access policies for both pipeline identities. * (Optionally) Run the exportPipeline to upload the artifacts to storage. Please see the [public documentation](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-transfer-images) for more details on ACR Transfer. #Prerequisities * **Container registries**: This sample uses an existing source ACR with artifacts to transfer and a target registry. ACR Transfer is available in the **Premium** container registry service tier only. * **Storage accounts**: Create source and target storage accounts that ACR Transfer will use to upload and download registry artifacts. Create a blob container for artifact transfer in each account. * **Key Vaults**: Key Vaults are used to store SAS token secrets for export and import. Create the source and target key vaults in the same resource group as your source and target registries. Follow the instructions [here](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-transfer-images#create-and-store-sas-keys) for instructions on how to create SAS tokens for export and import. Upload the export SAS token as a secret in your source key vault and the import SAS token as a secret in the target key vault. * User Assigned Identities (optional): You may choose to create user assigned managed identity resources to assign to each pipeline [see tutorial](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-manage-ua-identity-cli). Otherwise, ACR will automatically create a system assigned identity for each of the exportPipeline and importPipeline resources. Read more about the difference between user assigned and system assigned identities [here](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview). Note: You do not need to perform RBAC on these managed identities resources. The sample will add an access policy for each pipeline managed idenity so that it can fetch secrets from the necessary key vault. Note: The sample assumes the above resources are all in the same subscription. ## Running this sample ## * Create a service principal and assign it the contributor role of the source and target resource groups. ``` az ad sp create-for-rbac -n "MyApp" --sdk-auth --role contributor \ --scopes /subscriptions/{SubID}/resourceGroups/{SourceResourceGroup} \ /subscriptions/{SubID}/resourceGroups/{TargetResourceGroup} ``` Make note of `clientId`, `clientSecret`, `subscriptionId`, and `tenantId` to add to appsettings.json. ``` "TenantId": "", "SPClientId": "", "SPClientSecret": "", "SubscriptionId": "", ``` Update the ExportPipeline and ImportPipeline sections of appsettings.json with your source and target configs respectively: ``` "ResourceGroupName": "", "RegistryName": "", "PipelineName": "", "KeyVaultUri": "", "ContainerUri": "", "UserAssignedIdentity": "", "Options": [] ``` Where `KeyVaultUri` is the key vault SAS secret uri and `ContainerUri` is the storage container uri. `PipelineName` is the name you choose for the exportPipeline or importPipeline resources. For more info on `Options` please refer to [Export options](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-transfer-images#export-options) and [Import options](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-transfer-images#import-options). * If you intend to run the exportPipeline created in this sample, please fill out the following section of appsettings.json: ``` "ExportPipelineRun": { "PipelineRunName": "", "TargetName": "", "Artifacts": [] } ``` Where `TargetName` is the name you choose for the artifacts blob exported to your source storage account, such as 'myblob'. `PipelineRunName` is the name you choose for the pipelineRun resource. And `Artifacts` is the list of up to 50 artifacts that you would like to transfer from your source registry. Example: `[samples/hello-world:v1", "samples/nginx:v1" , "myrepository@sha256:0a2e01852872..."]` * Build ContainerRegistryTransfer.csproj (DotNetCore SDK 3.1 required) ``` dotnet build ContainerRegistryTransfer/ContainerRegistryTransfer.csproj dotnet ContainerRegistryTransfer/bin/Debug/netcoreapp3.1/ContainerRegistryTransfer.dll ``` ## More information ## [https://github.com/Azure/azure-sdk-for-net](https://github.com/Azure/azure-sdk-for-net) If you don't have a Microsoft Azure subscription you can get a FREE trial account [here](http://go.microsoft.com/fwlink/?LinkId=330212). ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/README.md ================================================ # **Overview of Registry Artifact Transfer Tool** ## The Registry Artifact Transfer Tool supports transfer workflows that combines both registry artifact import and export. For example, in a single run, it can (1) import artifacts from a source registry as well as storage blobs previously exported by ACR transfer into a target ACR; (2) batch export the artifacts imported in (1) along with any additional artifacts existing in the target ACR into storage blobs; (3) copy the blobs exported in (2) to a destination blob container so that they are ready to be imported to a different ACR. Please see the public documentation for more details on ACR [Transfer](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-transfer-images) and [Image Import](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-import-images). ### **Features** 1. Import a. Import registry artifacts from a source registry to a target ACR. b. Import registry artifacts from storage blobs exported by ACR transfer to a target ACR. 2. Export a. Export registry artifacts from a target ACR into storage blobs. b. Optionally, copy the exported storage blobs to another blob container. The copy destination blob container can be hooked up with a different ACR with an import pipeline to effectively transfer the artifacts to the latter, with storage blobs as transfer mediums. # **Prerequisities** 0. **Common** * An existing target ACR. The ACR must be in [Premium](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-skus) SKU if feature 1b, 2a, or 2b will be used. * A service principle with contributor role of the target ACR. 1. **Import** a. Import registry artifacts from a source registry to a target ACR. * **Source container registry**: an existing source container registry with artifacts to transfer. To permit image import from the source registry: * If the source registry is an ACR, either the service principle in the common prerequisities has read permission (e.g. Reader role) to the source ACR or the source ACR's user name and password are available. The user name can be the admin user of the source ACR or the client ID of a service principle with read permission (and client secret as the password). More details on the permission requirement can be found in [this](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-import-images#import-from-another-azure-container-registry) section. * If the source registry is a non-Azure private registry, credentials that enable pull access to the registry need to be available. b. Import registry artifacts from storage blobs exported by ACR transfer to a target ACR. * **Source storage blobs**: existing source storage blobs in storage containers exported by ACR [export pipelines](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-transfer-images#create-exportpipeline-with-resource-manager) where the source registry artifacts are contained. * **Import pipeline**: an existing [import pipeline](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-transfer-images#create-importpipeline-with-resource-manager) under the target ACR with valid permission (managed identity and sas token in a key vault) to access the storage blob container where the source blobs are located. 2. **Export** a. Export registry artifacts from a target ACR to storage blobs. * **Registry artifacts**: existing registry artifacts to be exported from the target ACR. * **Export pipeline**: an existing [export pipeline](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-transfer-images#create-exportpipeline-with-resource-manager) under the target ACR with valid permission (managed identity and sas token in a key vault) to access the storage blob container where the target blobs will be exported. b. Copy the exported storage blobs to another blob container. * **Copy destination blob container**: an existing storage blob container where the exported blobs will be copied to. * **SAS tokens**: A SAS token with read access to the export blob container and a SAS token with write access to the copy destination blob container. # **Configurations** ## The tool uses a single JSON formatted configuration file. The default configuration file name is `transferdefinition.json` and a sample can be found in `src/transferdefinition.json`. ### **Common configurations** The common configurations consist of the information about the Azure environment, the target ACR, and the service principle used to access the target ACR. ``` "AzureEnvironment": { "Name": "AzureGlobalCloud" }, "Registry": { "TenantId": "myTenantId", "SubscriptionId": "mySubscriptionId", "ResourceGroupName": "myResourceGroupName", "Name": "myRegistryName" }, "Identity": { "ClientId": "myClientId", "ClientSecret": "myClientSecret" }, ``` | Configuration | Description | |-----------------------------|---------------------------------------------------------------------------| | AzureEnvironment/Name | The name of the Azure environment. The list of the supported names can be found in [this](https://docs.microsoft.com/en-us/dotnet/api/microsoft.azure.management.resourcemanager.fluent.azureenvironment?view=azure-dotnet) document. | | Registry/TenantId | The tenant ID of the target ACR. | | Registry/SubscriptionId | The subscription ID of the target ACR. | | Registry/ResourceGroupName | The resource group name of the target ACR. | | Registry/Name | The name of the target ACR. | | Identity/ClientId | The client ID of the service principle used to access the target ACR. | | Identity/ClientSecret | The client secret of the service principle. | ### **Import configurations** The import configurations specify: * The repositories and tags from a source registry to be imported to the target ACR. * The storage blobs to be imported to the target ACR as well as the import pipeline to be used for the import. ``` "Import": { "Enabled": true, "Force": true, "SourceRegistry": { "ResourceId": "mySourceRegistryResourceId", "RegistryUri": "mysourceregistry.azurecr.io", "UserName": "clientId-or-adminUser", "Password": "clientSecret-or-adminPassword" }, "Repositories": [ "sourceRepo", "sourceRepoPrefix*" ], "Tags": [ "sourceRepo1:tag1", "sourceRepo2:tag2" ], "ImportPipelineName": "myImportPipelineName", "Blobs": [ "exportedBlob1", "exportedBlob2", "exportedBlob3" ] }, ``` | Configuration | Features | Description | |-----------------------------------|-----------|--------------------------------------------------------------------------| | Import/Enabled | 1a, 1b | Configures Whether the import feature is enabled. The default value is `false`. | | Import/Force | 1a | Configures whether any existing target tags will be overwritten (see [ImportMode](https://docs.microsoft.com/en-us/rest/api/containerregistry/registries/importimage#importmode)). The default value is `true`.| | Import/SourceRegistry/ResourceId | 1a | The Azure resource ID of the source registry if it is an ACR. If `ResourceId` is specified, `RegistryUri`, `UserName`, and `Password` must be skipped, and vice versa. The `Identity/ClientId` will be used to access the source ACR. | | Import/SourceRegistry/RegistryUri | 1a | The login server of the source registry, applicable to both ACRs and non-Azure public and private registries. If the source registry is a public registry, `UserName` and `Password` must be skipped. | | Import/SourceRegistry/UserName | 1a | The user name to access the source registry. | | Import/SourceRegistry/Password | 1a | The password of the user name. | | Import/Repositories | 1a | The list of repository names and name prefixes to match the repositories in the source registry. All tags in the matched repositories are imported. A repository name prefix can be specified with a `*` after the prefix string | | Import/Tags | 1a | The list of tags in the source registry to import. | | Import/ImportPipelineName | 1b | The import pipeline used to import source blobs to the target ACR. | | Import/Blobs | 1b | The source blobs containing the source registry artifacts, previously exported by ACR export pipelines. All blobs must be in the blob container targeted by the specified import pipeline. | ### **Export configurations** The export configurations specify: * The export controls such as batch artifact count and blob name prefix. * The repositories and tags from the target ACR to be exported to storage blobs. * The blob copy SAS token and URI for the export target blob container and the copy destination blob container, respectively. ``` "Export": { "Enabled": true, "ExportPipelineName": "myExportPipelineName", "MaxArtifactCountPerBlob": 50, "BlobNamePrefix": "artifacts", "IncludeImportedArtifacts": true, "Repositories": [ "targetRepo", "targetRepoPrefix*" ], "Tags": [ "targetRepo1:tag1", "targetRepo2:tag2" ], "CopyBlobs": { "Enabled": true, "SourceSasToken": "sourceBlobContainerSasToken", "DestContainerSasUri": "destinationBlobContainerSasUri" } } ``` | Configuration | Features | Description | |---------------------------------------|-----------|--------------------------------------------------------------------------- | | Export/Enabled | 2a | Configures whether the export feature is enabled. The default value is `false`. | | Export/ExportPipelineName | 2a | The export pipeline under the target ACR to export the specified artifacts. | | Export/MaxArtifactCountPerBlob | 2a | The maximum batch export artifact count. The default value is `50`. | | Export/BlobNamePrefix | 2a | The name prefix of the export storage blobs. | | Export/IncludeImportedArtifacts | 2a | Configures whether the artifacts imported by 1a and 1b are included in the export. The default value is `false`. | | Export/Repositories | 2a | The list of repository names and name prefixes to match the repositories in the target ACR. All tags in the matched repositories are exported. A repository name prefix can be specified with a `*` after the prefix string. | | Export/Tags | 2a | The list of tags in the target ACR to export. | | Export/CopyBlobs/Enabled | 2b | Configures whether the blob copy feature is enabled. | | Export/CopyBlobs/SourceSasToken | 2b | The SAS token of the blob container targed by the specified export pipeline.| | Export/CopyBlobs/DestContainerSasUri | 2b | The SAS URI of the copy destination blob container. | # **Running the tool** ## * Build the project (.NET Core SDK 3.1 required) ``` dotnet build src/RegistryArtifactTransfer.csproj ``` * Run the tool with the default configuration file transferdefinition.json ``` cd src/bin/Debug/netcoreapp3.1 dotnet RegistryArtifactTransfer.dll ``` * Run the tool with a custom configuration file name Copy the configuration file to src/bin/Debug/netcoreapp3.1 and run ``` cd src/bin/Debug/netcoreapp3.1 dotnet RegistryArtifactTransfer.dll myTransferDefinition.json ``` ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/Configurations/AzureEnvironmentConfiguration.cs ================================================ using System; namespace RegistryArtifactTransfer { public class AzureEnvironmentConfiguration { public string Name { get; set; } public string AuthenticationEndpoint { get; set; } public string ManagementEndpoint { get; set; } public string ResourceManagerEndpoint { get; set; } public string GraphEndpoint { get; set; } public string KeyVaultSuffix { get; set; } public string StorageEndpointSuffix { get; set; } public void Validate() { if (string.IsNullOrWhiteSpace(Name)) { throw new ArgumentNullException(nameof(Name)); } } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/Configurations/ExportConfiguration.cs ================================================ using System; using System.Collections.Generic; namespace RegistryArtifactTransfer { public class ExportConfiguration { public bool Enabled { get; set; } public bool IncludeImportedArtifacts { get; set; } public string ExportPipelineName { get; set; } public string BlobNamePrefix { get; set; } public int MaxConcurrency { get; set; } = 50; public int MaxArtifactCountPerBlob { get; set; } = 50; public int TransferTimeoutInSeconds { get; set; } = 300; public List Repositories { get; set; } = new List(); public List Tags { get; set; } = new List(); public CopyBlobsConfiguration CopyBlobs { get; set; } public void Validate() { if (!Enabled) { return; } if (MaxConcurrency < 1) { throw new ArgumentOutOfRangeException(nameof(MaxConcurrency), MaxConcurrency, "must be larger than 0"); } if (MaxArtifactCountPerBlob < 1) { throw new ArgumentOutOfRangeException(nameof(MaxArtifactCountPerBlob), MaxArtifactCountPerBlob, "must be larger than 0"); } if (TransferTimeoutInSeconds < 1) { throw new ArgumentOutOfRangeException(nameof(TransferTimeoutInSeconds), TransferTimeoutInSeconds, "must be larger than 0"); } if (string.IsNullOrWhiteSpace(ExportPipelineName)) { throw new ArgumentNullException(nameof(ExportPipelineName)); } if (string.IsNullOrWhiteSpace(BlobNamePrefix)) { throw new ArgumentNullException(nameof(BlobNamePrefix)); } CopyBlobs.Validate(); } } public class CopyBlobsConfiguration { public bool Enabled { get; set; } public string DestContainerSasUri { get; set; } public string SourceSasToken { get; set; } public void Validate() { if (!Enabled) { return; } if (string.IsNullOrWhiteSpace(DestContainerSasUri)) { throw new ArgumentNullException(nameof(DestContainerSasUri)); } if (string.IsNullOrWhiteSpace(SourceSasToken)) { throw new ArgumentNullException(nameof(SourceSasToken)); } } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/Configurations/IdentityConfiguration.cs ================================================ using System; namespace RegistryArtifactTransfer { public class IdentityConfiguration { public string ClientId { get; set; } public string ClientSecret { get; set; } public void Validate() { if (string.IsNullOrWhiteSpace(ClientId)) { throw new ArgumentNullException(nameof(ClientId)); } if (string.IsNullOrWhiteSpace(ClientSecret)) { throw new ArgumentNullException(nameof(ClientSecret)); } } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/Configurations/ImportConfiguration.cs ================================================ using System; using System.Collections.Generic; namespace RegistryArtifactTransfer { public class ImportConfiguration { public bool Enabled { get; set; } public bool Force { get; set; } = true; public int MaxConcurrency { get; set; } = 50; public int TransferTimeoutInSeconds { get; set; } = 300; public SourceRegistryConfiguration SourceRegistry { get; set; } public List Repositories { get; set; } = new List(); public List Tags { get; set; } = new List(); public string ImportPipelineName { get; set; } public List Blobs { get; set; } public void Validate() { if (!Enabled) { return; } if (MaxConcurrency < 1) { throw new ArgumentOutOfRangeException(nameof(MaxConcurrency), MaxConcurrency, "must be larger than 0"); } if (TransferTimeoutInSeconds < 1) { throw new ArgumentOutOfRangeException(nameof(TransferTimeoutInSeconds), TransferTimeoutInSeconds, "must be larger than 0"); } if (SourceRegistry == null) { throw new ArgumentNullException(nameof(SourceRegistry)); } SourceRegistry.Validate(); if (string.IsNullOrWhiteSpace(SourceRegistry.RegistryUri) && (Repositories != null && Repositories.Count > 0)) { throw new ArgumentException($"{nameof(SourceRegistry.RegistryUri)} must be specified to import {nameof(Repositories)}"); } if (string.IsNullOrWhiteSpace(ImportPipelineName) && Blobs != null && Blobs.Count > 0) { throw new ArgumentNullException(nameof(ImportPipelineName)); } } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/Configurations/RegistryConfiguration.cs ================================================ using System; namespace RegistryArtifactTransfer { public class RegistryConfiguration { public string TenantId { get; set; } public string SubscriptionId { get; set; } public string ResourceGroupName { get; set; } public string Name { get; set; } public void Validate() { if (string.IsNullOrWhiteSpace(TenantId)) { throw new ArgumentNullException(nameof(TenantId)); } if (string.IsNullOrWhiteSpace(SubscriptionId)) { throw new ArgumentNullException(nameof(SubscriptionId)); } if (string.IsNullOrWhiteSpace(ResourceGroupName)) { throw new ArgumentNullException(nameof(ResourceGroupName)); } if (string.IsNullOrWhiteSpace(Name)) { throw new ArgumentNullException(nameof(Name)); } } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/Configurations/SourceRegistryConfiguration.cs ================================================ using System; namespace RegistryArtifactTransfer { public class SourceRegistryConfiguration { public string ResourceId { get; set; } public string RegistryUri { get; set; } public string UserName { get; set; } public string Password { get; set; } public void Validate() { if (!string.IsNullOrWhiteSpace(ResourceId)) { if (!string.IsNullOrWhiteSpace(RegistryUri) || !string.IsNullOrWhiteSpace(UserName) || !string.IsNullOrWhiteSpace(Password)) { throw new ArgumentException($"{nameof(RegistryUri)}, {nameof(UserName)}, or {nameof(Password)} cannot be used with {nameof(ResourceId)}."); } } else if (string.IsNullOrWhiteSpace(RegistryUri)) { throw new ArgumentException($"Either {nameof(RegistryUri)} or {nameof(ResourceId)} must be specified."); } else if (string.IsNullOrWhiteSpace(UserName) ^ string.IsNullOrWhiteSpace(Password)) { throw new ArgumentException($"Either {nameof(UserName)} or {nameof(Password)} is missing."); } } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/Configurations/TransferDefinition.cs ================================================ namespace RegistryArtifactTransfer { public class TransferDefinition { public AzureEnvironmentConfiguration AzureEnvironment { get; set; } public IdentityConfiguration Identity { get; set; } public RegistryConfiguration Registry { get; set; } public ImportConfiguration Import { get; set; } public ExportConfiguration Export { get; set; } public void Validate() { AzureEnvironment.Validate(); Identity.Validate(); Registry.Validate(); Import.Validate(); Export.Validate(); } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/Program.cs ================================================ using Microsoft.Extensions.Configuration; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Logging; using Newtonsoft.Json; using Serilog; using System; using System.IO; using System.Net; using System.Threading.Tasks; namespace RegistryArtifactTransfer { public static class Program { private const string ReportFileNamePrefix = "report"; private const string LogFileNamePrefix = "log"; public static async Task Main(string[] args) { #region Global HTTP settings // https://github.com/Azure/azure-storage-net-data-movement#best-practice ServicePointManager.DefaultConnectionLimit = Environment.ProcessorCount * 8; ServicePointManager.Expect100Continue = false; #endregion #region Logger var logFileName = LogFileNamePrefix + "_" + string.Format("{0:yyyy-MM-dd_HH-mm-ss-fff}", DateTimeOffset.Now) + ".txt"; var loggerFactory = LoggerFactory.Create(builder => builder.AddSerilog(new LoggerConfiguration() .WriteTo.File(logFileName) .WriteTo.ColoredConsole() .CreateLogger())); var logger = loggerFactory.CreateLogger("RegistryArtifactTransfer"); #endregion #region Configurations string transferDefinitionFile = args.Length > 0 ? args[0] : Path.Combine(Path.GetDirectoryName(typeof(Program).Assembly.Location), "transferdefinition.json"); var transferDefinition = GetConfig(transferDefinitionFile); transferDefinition.Validate(); var azureEnvironmentConfiguration = transferDefinition.AzureEnvironment; var identityConfiguration = transferDefinition.Identity; var registryConfiguration = transferDefinition.Registry; #endregion #region TransferClient var transferClient = new TransferClient( azureEnvironmentConfiguration, identityConfiguration, registryConfiguration); #endregion #region Process transfer var report = new TransferReport(); var importWorker = new ImportWorker( transferDefinition.Import, registryConfiguration, transferClient, loggerFactory.CreateLogger()); var exportWorker = new ExportWorker( transferDefinition.Export, registryConfiguration, identityConfiguration, transferClient, loggerFactory.CreateLogger()); await importWorker.RunAsync(report); await exportWorker.RunAsync(report); #endregion #region Report results logger.LogInformation($"Total artifacts successfully imported: {report.ImportArtifacts.Succeeded.Count}."); logger.LogInformation($"Total artifacts failed to import: {report.ImportArtifacts.Failed.Count}."); logger.LogInformation($"Total blobs successfully exported: {report.ImportBlobs.Succeeded.Count}."); logger.LogInformation($"Total blobs failed to export: {report.ImportBlobs.Failed.Count}."); logger.LogInformation($"Total artifacts successfully exported: {report.ExportArtifacts.Succeeded.Count}."); logger.LogInformation($"Total artifacts failed in exporting: {report.ExportArtifacts.Failed.Count}."); logger.LogInformation($"Total blobs successfully exported: {report.ExportBlobs.Succeeded.Count}."); logger.LogInformation($"Total blobs failed in exporting: {report.ExportBlobs.Failed.Count}."); logger.LogInformation($"Total blobs successfully copied: {report.CopyBlobs.Succeeded.Count}."); logger.LogInformation($"Total blobs failed in copying: {report.CopyBlobs.Failed.Count}."); var reportFileName = ReportFileNamePrefix + "_" + string.Format("{0:yyyy-MM-dd_HH-mm-ss-fff}", DateTimeOffset.Now) + ".json"; await File.WriteAllTextAsync(reportFileName, JsonConvert.SerializeObject(report, Formatting.Indented)); #endregion logger.LogInformation("Done!"); } private static TransferDefinition GetConfig(string transferDefinitionFile) { var builder = new ConfigurationBuilder() .AddJsonFile(transferDefinitionFile) .AddEnvironmentVariables(); var transferDefinition = new TransferDefinition(); builder.Build().Bind(transferDefinition); return transferDefinition; } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/Registry.cs ================================================ using System; using static RegistryArtifactTransfer.ResourceId; namespace RegistryArtifactTransfer { public class Registry { public ResourceId ResourceId { get; } public string TenantId { get; } public string LoginServer { get; } public string UserName { get; } public string Password { get; } public Registry( string tenantId, string subscriptionId, string resourceGroupName, string registryName) { ResourceId = new ResourceId(subscriptionId, resourceGroupName, registryName, RegistriesARMResourceType); TenantId = tenantId; } public Registry( string loginServer, string userName, string password) { LoginServer = loginServer; UserName = userName; Password = password; } public void Validate() { if (ResourceId == null && string.IsNullOrEmpty(LoginServer)) { throw new ArgumentException($"Neither {nameof(ResourceId)} nor {nameof(LoginServer)} is specified."); } if (string.IsNullOrEmpty(UserName) ^ string.IsNullOrEmpty(Password)) { throw new ArgumentException($"{nameof(UserName)} and {nameof(Password)} should either be both specified or undeclared."); } } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/RegistryArtifactTransfer.csproj ================================================ Exe netcoreapp3.1 PreserveNewest ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/RepositoryProvider/CatalogApiResponse.cs ================================================ using System.Collections.Generic; using Newtonsoft.Json; namespace RegistryArtifactTransfer { public class CatalogApiResponse { [JsonProperty(PropertyName = "repositories")] public List Repositories; } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/RepositoryProvider/HttpMessageExtensions.cs ================================================ using System; using System.Linq; using System.Net.Http; using System.Net.Http.Headers; using System.Text; namespace RegistryArtifactTransfer { public static class HttpMessageExtensions { private const string LinkHeaderName = "Link"; private const string NextRelationType = "rel=\"next\""; public static Uri GetNextPageUri(this HttpResponseMessage response) { if (!response.Headers.Contains(LinkHeaderName)) { return null; } var headerLink = response.Headers.GetValues(LinkHeaderName); var nextPage = headerLink.FirstOrDefault(); if (!nextPage.Contains(NextRelationType)) { return null; } int backwardsPointer = nextPage.IndexOf(NextRelationType); nextPage = nextPage.Substring(nextPage.LastIndexOf('<', backwardsPointer) + 1, nextPage.LastIndexOf('>', backwardsPointer) - 1); if (nextPage.StartsWith('/')) { nextPage = nextPage.Substring(1); if (nextPage.StartsWith('/')) { return null; } } if (Uri.TryCreate(nextPage, UriKind.Absolute, out Uri nextPageUri)) { if (!string.Equals(nextPageUri.Scheme, Uri.UriSchemeHttp, StringComparison.OrdinalIgnoreCase) && !string.Equals(nextPageUri.Scheme, Uri.UriSchemeHttps, StringComparison.OrdinalIgnoreCase)) { return null; } return nextPageUri; } if (Uri.IsWellFormedUriString(nextPage, UriKind.Relative)) { var requestUri = response.RequestMessage.RequestUri; var baseUriBuilder = new UriBuilder(requestUri.Scheme, requestUri.Host, requestUri.Port); return new Uri(baseUriBuilder.Uri, nextPage); } return null; } public static void AddBasicAuth(this HttpRequestMessage request, string userName, string password) { if (!string.IsNullOrWhiteSpace(userName) && !string.IsNullOrWhiteSpace(password)) { var svcCredentials = Convert.ToBase64String(Encoding.UTF8.GetBytes(userName + ":" + password)); request.Headers.Authorization = new AuthenticationHeaderValue("Basic", svcCredentials); } } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/RepositoryProvider/RepositoryProviderV2.cs ================================================ using Newtonsoft.Json; using Polly; using Polly.Extensions.Http; using System; using System.Collections.Generic; using System.Net.Http; using System.Threading.Tasks; namespace RegistryArtifactTransfer { public class RepositoryProviderV2 { #region Route constants private const string GetCatalogRoute = "https://{0}/v2/_catalog?n={1}"; private const string GetTagsRoute = "https://{0}/v2/{1}/tags/list?n={2}"; #endregion private const int pageSize = 100; #region Exponential backoff retry with jitter private readonly TimeSpan initWaitTime = TimeSpan.FromSeconds(2); private readonly int maxRetryCount = 4; private readonly IAsyncPolicy retryPolicy; #endregion private readonly HttpClient _httpClient; public RepositoryProviderV2() { _httpClient = new HttpClient(); retryPolicy = HttpPolicyExtensions .HandleTransientHttpError() .WaitAndRetryAsync( maxRetryCount, retryAttempt => TimeSpan.FromSeconds(Math.Pow(initWaitTime.TotalSeconds, retryAttempt)) + TimeSpan.FromMilliseconds(new Random().Next(0, 100))); } public async Task> GetRepositoriesAsync( string registry, string userName, string password) { if (string.IsNullOrWhiteSpace(registry)) { throw new ArgumentNullException(nameof(registry)); } var repositories = new List(); var nextPageUri = new Uri(string.Format(GetCatalogRoute, registry, pageSize)); while (nextPageUri != null) { var response = await retryPolicy.ExecuteAsync( async () => { using (var request = new HttpRequestMessage(HttpMethod.Get, nextPageUri)) { request.AddBasicAuth(userName, password); return await _httpClient.SendAsync(request); } }); if (response.IsSuccessStatusCode) { var repoPage = JsonConvert.DeserializeObject(await response.Content.ReadAsStringAsync())?.Repositories; if (repoPage != null) { repositories.AddRange(repoPage); } nextPageUri = response.GetNextPageUri(); } else { var content = await response.Content.ReadAsStringAsync(); throw new Exception($"Registry:{registry} failed to list repositories. StatusCode:{response.StatusCode}, Reason:{response.ReasonPhrase}, Content:{content}"); } } return repositories; } public async Task> GetTagsAsync( string registry, string repository, string userName, string password) { if (string.IsNullOrWhiteSpace(registry)) { throw new ArgumentNullException(nameof(registry)); } if (string.IsNullOrWhiteSpace(repository)) { throw new ArgumentNullException(nameof(repository)); } var tags = new List(); var nextPageUri = new Uri(string.Format(GetTagsRoute, registry, repository, pageSize)); while (nextPageUri != null) { var response = await retryPolicy.ExecuteAsync( async () => { using (var request = new HttpRequestMessage(HttpMethod.Get, nextPageUri)) { request.AddBasicAuth(userName, password); return await _httpClient.SendAsync(request); } }); if (response.IsSuccessStatusCode) { var tagPage = JsonConvert.DeserializeObject(await response.Content.ReadAsStringAsync())?.Tags; if (tagPage != null) { tags.AddRange(tagPage); } nextPageUri = response.GetNextPageUri(); } else { var content = await response.Content.ReadAsStringAsync(); throw new Exception($"Registry:{registry} Repository:{repository} failed to list tags. StatusCode:{response.StatusCode}, Reason:{response.ReasonPhrase}, Content:{content}"); } } return tags; } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/RepositoryProvider/TagListApiResponse.cs ================================================ using System.Collections.Generic; using Newtonsoft.Json; namespace RegistryArtifactTransfer { public class TagListApiResponse { [JsonProperty(PropertyName = "name")] public string Name; [JsonProperty(PropertyName = "tags")] public List Tags; } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/ResourceId.cs ================================================ using System; namespace RegistryArtifactTransfer { public class ResourceId { public const string ContainerRegistryProviderNamespace = "Microsoft.ContainerRegistry"; public const string RegistriesARMResourceType = ContainerRegistryProviderNamespace + "/registries"; public const string ExportPipelineResourceType = "exportPipelines"; public const string ImportPipelineResourceType = "importPipelines"; public const string PipelineRunResourceType = "pipelineRuns"; public string SubscriptionId { get; set; } public string ResourceGroupName { get; set; } public string ResourceName { get; set; } public string ArmResourceType { get; set; } // Current only support single-level nested resources. public string ChildResourceName { get; set; } public string ChildResourceType { get; set; } public ResourceId() { } public ResourceId( string subscriptionId, string resourceGroupName, string resourceName, string armResourceType) { this.SubscriptionId = subscriptionId ?? throw new ArgumentNullException(nameof(subscriptionId)); this.ResourceGroupName = resourceGroupName ?? throw new ArgumentNullException(nameof(resourceGroupName)); this.ResourceName = resourceName ?? throw new ArgumentNullException(nameof(resourceName)); this.ArmResourceType = armResourceType ?? throw new ArgumentNullException(nameof(armResourceType)); this.ChildResourceName = null; this.ChildResourceType = null; } public ResourceId( string subscriptionId, string resourceGroupName, string resourceName, string armResourceType, string childResourceType, string childResourceName) : this( subscriptionId, resourceGroupName, resourceName, armResourceType) { this.ChildResourceName = childResourceName ?? throw new ArgumentNullException(nameof(childResourceName)); this.ChildResourceType = childResourceType ?? throw new ArgumentNullException(nameof(childResourceType)); } public static bool TryParse(string value, out ResourceId resourceId) { try { resourceId = Parse(value); return true; } catch (FormatException) { resourceId = default(ResourceId); return false; } } public static ResourceId Parse(string value) { if (value == null) { throw new ArgumentNullException(nameof(value)); } var components = value.Split('/'); // The accepted string values are: // "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}" // "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{provideName}/{resourceType}/{resourceName}" // "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{provideName}/{resourceType}/{resourceName}/{childResourceType}/{childResourceName}" if ((components.Length != 5 && components.Length != 9 && components.Length != 11) || !string.IsNullOrEmpty(components[0]) || !string.Equals(components[1], "subscriptions", StringComparison.OrdinalIgnoreCase) || !string.Equals(components[3], "resourceGroups", StringComparison.OrdinalIgnoreCase) || (components.Length > 5 && !string.Equals(components[5], "providers", StringComparison.OrdinalIgnoreCase))) { throw new FormatException("Failed to parse a resource id from the input string: \"" + value + "\""); } var resourceId = new ResourceId() { SubscriptionId = components[2], ResourceGroupName = components[4] }; if (components.Length > 5) { resourceId.ArmResourceType = string.Join("/", components[6], components[7]); resourceId.ResourceName = components[8]; } if (components.Length > 9) { resourceId.ChildResourceType = components[9]; resourceId.ChildResourceName = components[10]; } return resourceId; } public override string ToString() { var resourceIdString = GetParentResourceId(); if (!string.IsNullOrEmpty(ChildResourceType)) { resourceIdString = string.Join("/", resourceIdString, ChildResourceType, ChildResourceName); } return resourceIdString; } public string GetParentResourceId() { var resourceIdString = string.Join("/", string.Empty, "subscriptions", SubscriptionId, "resourceGroups", ResourceGroupName); if (!string.IsNullOrEmpty(ArmResourceType)) { resourceIdString = string.Join("/", resourceIdString, "providers", ArmResourceType, ResourceName); } return resourceIdString; } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/TaskExtensions.cs ================================================ using System; using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; namespace RegistryArtifactTransfer { public static class TaskExtensions { public static async Task ThrottledWhenAll( this IEnumerable source, Func operation, int maxConcurrency) { var tasks = new List(); using (var throttler = new SemaphoreSlim(maxConcurrency, maxConcurrency)) { foreach (var element in source) { await throttler.WaitAsync().ConfigureAwait(false); tasks.Add(Task.Run(async () => { try { await operation(element).ConfigureAwait(false); } finally { throttler.Release(); } })); } await Task.WhenAll(tasks).ConfigureAwait(false); } } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/Transfer/ArtifactProvider.cs ================================================ using Microsoft.Extensions.Logging; using System; using System.Collections.Generic; using System.Threading.Tasks; namespace RegistryArtifactTransfer { public class ArtifactProvider { private readonly ILogger _logger; private readonly RepositoryProviderV2 _repositoryProvider; public ArtifactProvider(ILogger logger) : base() { _logger = logger ?? throw new ArgumentNullException(nameof(logger)); _repositoryProvider = new RepositoryProviderV2(); } public async Task> GetArtifactsAsync( string registryUri, string userName, string password, List repoFilters) { var artifacts = new List(); IEnumerable repositories = null; repositories = await _repositoryProvider.GetRepositoriesAsync( registryUri, userName, password).ConfigureAwait(false); if (repositories != null) { foreach (var repo in repositories) { if (Match(repo, repoFilters)) { _logger.LogInformation($"Repository matched: {repo}"); IEnumerable tags; tags = await _repositoryProvider.GetTagsAsync( registryUri, repo, userName, password).ConfigureAwait(false); if (tags != null) { foreach (var tag in tags) { artifacts.Add($"{repo.ToLowerInvariant()}:{tag}"); } } } } } return artifacts; } private static bool Match( string repo, List repoFilters) { foreach (var filter in repoFilters) { if (filter.EndsWith('*')) { var prefix = filter.Substring(0, filter.Length - 1); if (repo.StartsWith(prefix, StringComparison.OrdinalIgnoreCase)) { return true; } } else if (string.Equals(repo, filter, StringComparison.OrdinalIgnoreCase)) { return true; } } return false; } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/Transfer/BlobCopier.cs ================================================ using Microsoft.Azure.Storage.Blob; using Microsoft.Azure.Storage.DataMovement; using Microsoft.Extensions.Logging; using System; using System.Threading; using System.Threading.Tasks; namespace RegistryArtifactTransfer { public class BlobCopier { private readonly ILogger _logger; private readonly CloudBlobContainer _sourceContainer; private readonly CloudBlobContainer _targetContainer; public BlobCopier( Uri sourceContainerSas, Uri targetContainerSas, ILogger logger) { _logger = logger ?? throw new ArgumentNullException(nameof(logger)); _sourceContainer = new CloudBlobContainer(sourceContainerSas); _targetContainer = new CloudBlobContainer(targetContainerSas); } public async Task CopyAsync( string blobName, CancellationToken token = default(CancellationToken)) { var sourceBlob = _sourceContainer.GetBlobReference(blobName); var targetBlob = _targetContainer.GetBlobReference(blobName); TransferCheckpoint checkpoint = null; SingleTransferContext context = GetSingleTransferContext(checkpoint, blobName); await TransferManager.CopyAsync( sourceBlob: sourceBlob, destBlob: targetBlob, copyMethod: CopyMethod.ServiceSideAsyncCopy, options: null, context: context, cancellationToken: token).ConfigureAwait(false); } private SingleTransferContext GetSingleTransferContext( TransferCheckpoint checkpoint, string blobName) { SingleTransferContext context = new SingleTransferContext(checkpoint); context.ShouldOverwriteCallbackAsync = TransferContext.ForceOverwrite; context.ProgressHandler = new Progress((progress) => { _logger.LogInformation($"{blobName}: bytes transferred {progress.BytesTransferred}."); }); return context; } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/Transfer/ExportJob.cs ================================================ using System.Collections.Generic; namespace RegistryArtifactTransfer { public class ExportJob { public string ExportPipelineName { get; set; } public string PipelineRunName { get; set; } public string ExportBlobName { get; set; } public List Images { get; set; } = new List(); public TransferJobStatus Status { get; set; } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/Transfer/ExportWorker.cs ================================================ using Microsoft.Extensions.Logging; using System; using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; namespace RegistryArtifactTransfer { public class ExportWorker { private readonly ExportConfiguration _exportConfiguration; private readonly IdentityConfiguration _identityConfiguration; private readonly ILogger _logger; private readonly TransferClient _transferClient; private readonly Registry _registry; public ExportWorker( ExportConfiguration exportDefinition, RegistryConfiguration registryConfiguration, IdentityConfiguration identityConfiguration, TransferClient transferClient, ILogger logger) { _transferClient = transferClient ?? throw new ArgumentNullException(nameof(transferClient)); _exportConfiguration = exportDefinition ?? throw new ArgumentNullException(nameof(exportDefinition)); _identityConfiguration = identityConfiguration ?? throw new ArgumentNullException(nameof(identityConfiguration)); _logger = logger ?? throw new ArgumentNullException(nameof(logger)); _registry = new Registry( registryConfiguration.TenantId, registryConfiguration.SubscriptionId, registryConfiguration.ResourceGroupName, registryConfiguration.Name); } public async Task RunAsync(TransferReport transferReport) { if (!_exportConfiguration.Enabled) { return; } // // Include imported images var importedImages = new List(); if (_exportConfiguration.IncludeImportedArtifacts && transferReport.ImportArtifacts.Succeeded.Count > 0) { importedImages.AddRange(transferReport.ImportArtifacts.Succeeded); } if (importedImages.Count == 0 && _exportConfiguration.Repositories.Count == 0 && _exportConfiguration.Tags.Count == 0) { return; } // // Validate exportPipeline var exportPipeline = await _transferClient.GetExportPipelineAsync(_exportConfiguration.ExportPipelineName).ConfigureAwait(false); if (!exportPipeline.ProvisioningState.Equals("succeeded", StringComparison.OrdinalIgnoreCase)) { throw new Exception($"ExportPipeline:{_exportConfiguration.ExportPipelineName} is in non-success provisioning state {exportPipeline.ProvisioningState}."); } // // Export images var exportJobs = await CreateExportJobsAsync(importedImages).ConfigureAwait(false); await exportJobs.ThrottledWhenAll( async (job) => await ExecuteExportAsync(job).ConfigureAwait(false), _exportConfiguration.MaxConcurrency).ConfigureAwait(false); foreach (var job in exportJobs) { if (job.Status == TransferJobStatus.Succeeded) { transferReport.ExportArtifacts.Succeeded.AddRange(job.Images); transferReport.ExportBlobs.Succeeded.Add(job.ExportBlobName); } else if (job.Status == TransferJobStatus.Failed) { transferReport.ExportArtifacts.Failed.AddRange(job.Images); transferReport.ExportBlobs.Failed.Add(job.ExportBlobName); } } // // Copy exported blobs if (_exportConfiguration.CopyBlobs != null && _exportConfiguration.CopyBlobs.Enabled && transferReport.ExportBlobs.Succeeded.Count > 0) { var sourceContainerSas = new Uri(await GetSourceSasUriAsync().ConfigureAwait(false)); var targetContainerSas = new Uri(_exportConfiguration.CopyBlobs.DestContainerSasUri); var blobCopier = new BlobCopier(sourceContainerSas, targetContainerSas, _logger); foreach (var blobName in transferReport.ExportBlobs.Succeeded) { _logger.LogInformation($"Blob {blobName}: starting to copy ."); try { await blobCopier.CopyAsync(blobName).ConfigureAwait(false); transferReport.CopyBlobs.Succeeded.Add(blobName); _logger.LogInformation($"Blob {blobName}: Successfully copied."); } catch (Exception e) { transferReport.CopyBlobs.Failed.Add(blobName); _logger.LogError($"Blob {blobName}: failed to copy, exception: {e}."); } } } } private async Task ExecuteExportAsync(ExportJob exportJob) { try { using (var cts = new CancellationTokenSource(TimeSpan.FromSeconds(_exportConfiguration.TransferTimeoutInSeconds))) { await _transferClient.ExportImagesToStorageAsync( exportJob.ExportPipelineName, exportJob.PipelineRunName, exportJob.Images, exportJob.ExportBlobName, cts.Token).ConfigureAwait(false); } exportJob.Status = TransferJobStatus.Succeeded; _logger.LogInformation($"PipelineRun {exportJob.PipelineRunName} succeeded."); } catch (Exception e) { exportJob.Status = TransferJobStatus.Failed; _logger.LogError($"PipelineRun {exportJob.PipelineRunName} failed, exception: {e}."); } } private async Task> CreateExportJobsAsync(List importedImages) { var exportJobs = new List(); var images = new List(); var importedImageCount = importedImages?.Count ?? 0; if (importedImageCount > 0) { images.AddRange(importedImages); } if (_exportConfiguration.Repositories != null && _exportConfiguration.Repositories.Count > 0) { var artifactProvider = new ArtifactProvider(_logger); var loginServer = await _transferClient.GetRegistryLoginServerAsync().ConfigureAwait(false); var matchedTags = await artifactProvider.GetArtifactsAsync( loginServer, _identityConfiguration.ClientId, _identityConfiguration.ClientSecret, _exportConfiguration.Repositories).ConfigureAwait(false); images.AddRange(matchedTags); } if (_exportConfiguration.Tags != null) { images.AddRange(_exportConfiguration.Tags); } _logger.LogInformation($"Total artifacts to export: {images.Count}."); var i = 0; while (i < images.Count) { if (i % _exportConfiguration.MaxArtifactCountPerBlob == 0) { var endIndex = Math.Min(i + _exportConfiguration.MaxArtifactCountPerBlob - 1, images.Count - 1); var pipelineRunName = Guid.NewGuid().ToString("N"); var targetBlobName = $"{_exportConfiguration.BlobNamePrefix}From{i+1}To{endIndex+1}"; var job = new ExportJob { PipelineRunName = pipelineRunName, ExportPipelineName = _exportConfiguration.ExportPipelineName, ExportBlobName = targetBlobName }; while (i <= endIndex) { job.Images.Add(images[i]); i++; } exportJobs.Add(job); } } return exportJobs; } private async Task GetSourceSasUriAsync() { var exportPipeline = await _transferClient.GetExportPipelineAsync(_exportConfiguration.ExportPipelineName).ConfigureAwait(false); var blobContainerUri = exportPipeline?.Target?.Uri; if (string.IsNullOrWhiteSpace(blobContainerUri)) { throw new Exception($"Invalid source blob container URI from export pipeline {exportPipeline}."); } var sasToken = _exportConfiguration.CopyBlobs.SourceSasToken; if (!sasToken.StartsWith('?')) { sasToken = "?" + sasToken; } return blobContainerUri + sasToken; } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/Transfer/ImportJob.cs ================================================ using System; using System.Collections.Generic; namespace RegistryArtifactTransfer { public class ImportJob { public ImportSourceType SourceType { get; set; } public List Images { get; set; } = new List(); public string ImportPipelineName { get; set; } public string PipelineRunName { get; set; } public string ImportBlobName { get; set; } public TransferJobStatus Status { get; set; } = TransferJobStatus.Pending; } public enum ImportSourceType { AzureStorageBlob, RegistryImage } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/Transfer/ImportWorker.cs ================================================ using Microsoft.Extensions.Logging; using System; using System.Collections.Generic; using System.Linq; using System.Threading; using System.Threading.Tasks; namespace RegistryArtifactTransfer { public class ImportWorker { private readonly ImportConfiguration _importConfiguration; private readonly ILogger _logger; private readonly TransferClient _transferClient; private readonly Registry _sourceRegistry; public ImportWorker( ImportConfiguration importConfiguration, RegistryConfiguration registryConfiguration, TransferClient transferClient, ILogger logger) { if (registryConfiguration == null) { throw new ArgumentNullException(nameof(registryConfiguration)); } _importConfiguration = importConfiguration ?? throw new ArgumentNullException(nameof(importConfiguration)); _transferClient = transferClient ?? throw new ArgumentNullException(nameof(transferClient)); _logger = logger ?? throw new ArgumentNullException(nameof(logger)); if (!string.IsNullOrWhiteSpace(importConfiguration.SourceRegistry.ResourceId)) { var resourceId = ResourceId.Parse(importConfiguration.SourceRegistry.ResourceId); _sourceRegistry = new Registry( null, resourceId.SubscriptionId, resourceId.ResourceGroupName, resourceId.ResourceName); } else { _sourceRegistry = new Registry( importConfiguration.SourceRegistry.RegistryUri, importConfiguration.SourceRegistry.UserName, importConfiguration.SourceRegistry.Password); } } public async Task RunAsync(TransferReport transferReport) { if (!_importConfiguration.Enabled) { return; } // // Validate importPipeline if (_importConfiguration.Blobs.Count > 0) { var importPipeline = await _transferClient.GetImportPipelineAsync(_importConfiguration.ImportPipelineName).ConfigureAwait(false); if (!importPipeline.ProvisioningState.Equals("succeeded", StringComparison.OrdinalIgnoreCase)) { throw new Exception($"ImportPipeline:{_importConfiguration.ImportPipelineName} is in non-success provisioning state {importPipeline.ProvisioningState}."); } } var importJobs = await CreateImportJobs().ConfigureAwait(false); await importJobs.ThrottledWhenAll( async (job) => await ExecuteAsync(job).ConfigureAwait(false), _importConfiguration.MaxConcurrency).ConfigureAwait(false); foreach (var job in importJobs) { if (job.Status == TransferJobStatus.Succeeded) { transferReport.ImportArtifacts.Succeeded.AddRange(job.Images); if (job.SourceType == ImportSourceType.AzureStorageBlob) { transferReport.ImportBlobs.Succeeded.Add(job.ImportBlobName); } } else if (job.Status == TransferJobStatus.Failed) { transferReport.ImportArtifacts.Failed.AddRange(job.Images); if (job.SourceType == ImportSourceType.AzureStorageBlob) { transferReport.ImportBlobs.Failed.Add(job.ImportBlobName); } } } } private async Task> CreateImportJobs() { var importJobs = new List(); var images = new List(); if (_importConfiguration.Repositories != null) { var artifactProvider = new ArtifactProvider(_logger); var matchedTags = await artifactProvider.GetArtifactsAsync( _importConfiguration.SourceRegistry.RegistryUri, _importConfiguration.SourceRegistry.UserName, _importConfiguration.SourceRegistry.Password, _importConfiguration.Repositories).ConfigureAwait(false); images.AddRange(matchedTags); } if (_importConfiguration.Tags != null) { images.AddRange(_importConfiguration.Tags); } _logger.LogInformation($"Total registry artifacts to import: {images.Count}"); foreach (var image in images) { var job = new ImportJob { SourceType = ImportSourceType.RegistryImage, }; job.Images.Add(image); importJobs.Add(job); } var blobs = _importConfiguration.Blobs; _logger.LogInformation($"Total storage blobs to import: {blobs.Count}"); if (blobs != null && blobs.Count > 0) { foreach (var blob in blobs) { var job = new ImportJob { SourceType = ImportSourceType.AzureStorageBlob, ImportPipelineName = _importConfiguration.ImportPipelineName, PipelineRunName = Guid.NewGuid().ToString("N"), ImportBlobName = blob }; importJobs.Add(job); } } return importJobs; } private async Task ExecuteAsync(ImportJob importJob) { using (var cts = new CancellationTokenSource(TimeSpan.FromSeconds(_importConfiguration.TransferTimeoutInSeconds))) { if (importJob.SourceType == ImportSourceType.RegistryImage) { await ExecuteImportImageAsync(importJob, cts.Token).ConfigureAwait(false); } else if (importJob.SourceType == ImportSourceType.AzureStorageBlob) { await ExecutePipelineRunAsync(importJob, cts.Token).ConfigureAwait(false); } } } private async Task ExecuteImportImageAsync(ImportJob importJob, CancellationToken cancellationToken) { var image = importJob.Images.First(); try { await _transferClient.ImportImageAsync( _sourceRegistry, image, _importConfiguration.Force, cancellationToken).ConfigureAwait(false); importJob.Status = TransferJobStatus.Succeeded; _logger.LogInformation($"Sucessfully imported {image}"); } catch (Exception e) { importJob.Status = TransferJobStatus.Failed; _logger.LogError($"Failed to import: {image}, exception: {e}"); } } private async Task ExecutePipelineRunAsync(ImportJob importJob, CancellationToken cancellationToken) { try { var importedImages = await _transferClient.ImportImagesFromStorageAsync( importJob.ImportPipelineName, importJob.PipelineRunName, importJob.ImportBlobName, cancellationToken).ConfigureAwait(false); importJob.Status = TransferJobStatus.Succeeded; if (importedImages != null && importedImages.Count > 0) { foreach (var image in importedImages) { importJob.Images.Add(image); _logger.LogInformation($"Sucessfully imported {image}, pipelineRun:{importJob.PipelineRunName}, blob:{importJob.ImportBlobName}."); } } } catch (Exception e) { importJob.Status = TransferJobStatus.Failed; _logger.LogError($"Failed to import blob:{importJob.ImportBlobName}, pipelineRun:{importJob.PipelineRunName}, exception: {e}"); } } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/Transfer/TransferClient.cs ================================================ using Microsoft.Azure.Management.ContainerRegistry; using Microsoft.Azure.Management.ContainerRegistry.Models; using Microsoft.Azure.Management.ResourceManager.Fluent; using Microsoft.Azure.Management.ResourceManager.Fluent.Authentication; using System; using System.Collections.Generic; using System.Threading; using static RegistryArtifactTransfer.ResourceId; namespace RegistryArtifactTransfer { public class TransferClient { private const string ImportModeForce = "Force"; private const string ImportModeNoForce = "NoForce"; private readonly ContainerRegistryManagementClient _registryClient; private readonly RegistryConfiguration _registryConfiguration; public TransferClient( AzureEnvironmentConfiguration azureEnvironmentConfiguration, IdentityConfiguration identityConfiguration, RegistryConfiguration registryConfiguration) { _registryConfiguration = registryConfiguration ?? throw new ArgumentNullException(nameof(registryConfiguration)); var env = AzureEnvironment.FromName(azureEnvironmentConfiguration.Name); if (env == null) { env = new AzureEnvironment { Name = azureEnvironmentConfiguration.Name, AuthenticationEndpoint = azureEnvironmentConfiguration.AuthenticationEndpoint, ManagementEndpoint = azureEnvironmentConfiguration.ManagementEndpoint, ResourceManagerEndpoint = azureEnvironmentConfiguration.ResourceManagerEndpoint, GraphEndpoint = azureEnvironmentConfiguration.GraphEndpoint, KeyVaultSuffix = azureEnvironmentConfiguration.KeyVaultSuffix, StorageEndpointSuffix = azureEnvironmentConfiguration.StorageEndpointSuffix }; } var credential = new AzureCredentials( new ServicePrincipalLoginInformation { ClientId = identityConfiguration.ClientId, ClientSecret = identityConfiguration.ClientSecret }, registryConfiguration.TenantId, env); _registryClient = new ContainerRegistryManagementClient(credential.WithDefaultSubscription(registryConfiguration.SubscriptionId)); _registryClient.SubscriptionId = registryConfiguration.SubscriptionId; } public async System.Threading.Tasks.Task ImportImageAsync( Registry sourceRegistry, string sourceImage, bool force = true, CancellationToken cancellationToken = default(CancellationToken)) { if (sourceRegistry == null) { throw new ArgumentNullException(nameof(sourceRegistry)); } var sourceResourceId = sourceRegistry.ResourceId?.ToString(); var sourceLoginServer = string.IsNullOrEmpty(sourceResourceId) ? sourceRegistry.LoginServer : null; var importSource = new ImportSource() { ResourceId = sourceResourceId, RegistryUri = sourceLoginServer, SourceImage = sourceImage }; if (!string.IsNullOrEmpty(sourceRegistry.UserName)) { importSource.Credentials = new ImportSourceCredentials() { Username = sourceRegistry.UserName, Password = sourceRegistry.Password }; } var importImageParameters = new ImportImageParameters() { Mode = force ? ImportModeForce : ImportModeNoForce, Source = importSource, TargetTags = new List{sourceImage} }; importImageParameters.Validate(); await _registryClient.Registries.ImportImageAsync( _registryConfiguration.ResourceGroupName, _registryConfiguration.Name, importImageParameters, cancellationToken).ConfigureAwait(false); } public async System.Threading.Tasks.Task> ImportImagesFromStorageAsync( string importPipelineName, string pipelineRunName, string sourceBlobName, CancellationToken cancellationToken = default(CancellationToken)) { var request = CreateImportPipelineRunRequest(importPipelineName, sourceBlobName); var pipelineRun = await CreatePipelineRunAsync(pipelineRunName, request, cancellationToken).ConfigureAwait(false); return pipelineRun?.Response?.ImportedArtifacts; } public async System.Threading.Tasks.Task ExportImagesToStorageAsync( string exportPipelineName, string pipelineRunName, List images, string targetBlobName, CancellationToken cancellationToken = default(CancellationToken)) { var request = CreateExportPipelineRunRequest(exportPipelineName, images, targetBlobName); await CreatePipelineRunAsync(pipelineRunName, request, cancellationToken).ConfigureAwait(false); } public async System.Threading.Tasks.Task GetExportPipelineAsync( string exportPipelineName, CancellationToken cancellationToken = default(CancellationToken)) { return await _registryClient.ExportPipelines.GetAsync( _registryConfiguration.ResourceGroupName, _registryConfiguration.Name, exportPipelineName, cancellationToken).ConfigureAwait(false); } public async System.Threading.Tasks.Task GetImportPipelineAsync( string importPipelineName, CancellationToken cancellationToken = default(CancellationToken)) { return await _registryClient.ImportPipelines.GetAsync( _registryConfiguration.ResourceGroupName, _registryConfiguration.Name, importPipelineName, cancellationToken).ConfigureAwait(false); } public async System.Threading.Tasks.Task GetRegistryLoginServerAsync( CancellationToken cancellationToken = default(CancellationToken)) { var registry = await _registryClient.Registries.GetAsync( _registryConfiguration.ResourceGroupName, _registryConfiguration.Name, cancellationToken).ConfigureAwait(false); return registry?.LoginServer; } private async System.Threading.Tasks.Task CreatePipelineRunAsync( string pipelineRunName, PipelineRunRequest request, CancellationToken cancellationToken = default(CancellationToken)) { return await _registryClient.PipelineRuns.CreateAsync( _registryConfiguration.ResourceGroupName, _registryConfiguration.Name, pipelineRunName, request, forceUpdateTag: null, cancellationToken).ConfigureAwait(false); } private PipelineRunRequest CreateImportPipelineRunRequest( string importPipelineName, string sourceBlobName) { var importPipelineResourceId = new ResourceId( _registryConfiguration.SubscriptionId, _registryConfiguration.ResourceGroupName, _registryConfiguration.Name, RegistriesARMResourceType, ImportPipelineResourceType, importPipelineName); return new PipelineRunRequest { PipelineResourceId = importPipelineResourceId.ToString(), Source = new PipelineRunSourceProperties { Type = PipelineRunTargetType.AzureStorageBlob.ToString(), Name = sourceBlobName } }; } private PipelineRunRequest CreateExportPipelineRunRequest( string exportPipelineName, List images, string targetBlobName) { var exportPipelineResourceId = new ResourceId( _registryConfiguration.SubscriptionId, _registryConfiguration.ResourceGroupName, _registryConfiguration.Name, RegistriesARMResourceType, ExportPipelineResourceType, exportPipelineName); return new PipelineRunRequest { PipelineResourceId = exportPipelineResourceId.ToString(), Artifacts = images, Target = new PipelineRunTargetProperties { Type = PipelineRunTargetType.AzureStorageBlob.ToString(), Name = targetBlobName } }; } } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/Transfer/TransferJobStatus.cs ================================================ namespace RegistryArtifactTransfer { public enum TransferJobStatus { Pending, Succeeded, Failed } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/TransferReport.cs ================================================ namespace RegistryArtifactTransfer { public class TransferReport { public TransferResult ImportArtifacts { get; set; } = new TransferResult(); public TransferResult ImportBlobs { get; set; } = new TransferResult(); public TransferResult ExportArtifacts { get; set; } = new TransferResult(); public TransferResult ExportBlobs { get; set; } = new TransferResult(); public TransferResult CopyBlobs { get; set; } = new TransferResult(); } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/TransferResult.cs ================================================ using System.Collections.Generic; namespace RegistryArtifactTransfer { public class TransferResult { public List Succeeded { get; set; } = new List(); public List Failed { get; set; } = new List(); } } ================================================ FILE: samples/dotnetcore/registry-artifact-transfer/src/transferdefinition.json ================================================ { "AzureEnvironment": { "Name": "AzureGlobalCloud" }, "Registry": { "TenantId": "myTenantId", "SubscriptionId": "mySubscriptionId", "ResourceGroupName": "myResourceGroupName", "Name": "myRegistryName" }, "Identity": { "ClientId": "myClientId", "ClientSecret": "myClientSecret" }, "Import": { "Enabled": true, "Force": true, "SourceRegistry": { "ResourceId": "mySourceRegistryResourceId", "RegistryUri": "mysourceregistry.azurecr.io", "UserName": "clientId-or-adminUser", "Password": "clientSecret-or-adminPassword" }, "Repositories": [ "sourceRepo", "sourceRepoPrefix*" ], "Tags": [ "sourceRepo1:tag1", "sourceRepo2:tag2" ], "ImportPipelineName": "myImportPipelineName", "Blobs": [ "exportedBlob1", "exportedBlob2", "exportedBlob3" ] }, "Export": { "Enabled": true, "ExportPipelineName": "myExportPipelineName", "MaxArtifactCountPerBlob": 50, "BlobNamePrefix": "artifacts", "IncludeImportedArtifacts": true, "Repositories": [ "targetRepo", "targetRepoPrefix*" ], "Tags": [ "targetRepo1:tag1", "targetRepo2:tag2" ], "CopyBlobs": { "Enabled": true, "SourceSasToken": "sourceBlobContainerSasToken", "DestContainerSasUri": "destinationBlobContainerSasUri" } } } ================================================ FILE: samples/java/task/.factorypath ================================================ ================================================ FILE: samples/java/task/.gitignore ================================================ *.class # Auth filed *.auth *.azureauth # Mobile Tools for Java (J2ME) .mtj.tmp/ # Package Files # *.jar *.war *.ear # Azure Tooling # node_modules packages # Eclipse # *.pydevproject .project .metadata bin/** tmp/** tmp/**/* *.tmp *.bak *.swp *~.nib local.properties .classpath .settings/ .loadpath # Other Tooling # .classpath .project target/ .idea *.iml # Mac OS # .DS_Store .DS_Store? # Windows # Thumbs.db # reduced pom files should not be included dependency-reduced-pom.xml ================================================ FILE: samples/java/task/Dockerfile ================================================ FROM maven:3.5.4-jdk-8 COPY . . RUN mvn clean compile ================================================ FILE: samples/java/task/README.md ================================================ ## Getting Started with Container Registry - Manage Container Registry Task - in Java ## * Create an Azure Container Registry. * Schedule a new run to build a container image and push it to the registry. * Wait for the run completion and download the run log. * Create a task and queue a new run using the task. * Schedule a new multi-step task run. * List runs in the registry. ## Running this Sample ## To run this sample: Set the environment variable `AZURE_AUTH_LOCATION` with the full path for an auth file. See [how to create an auth file](https://github.com/Azure/azure-libraries-for-java/blob/master/AUTH.md). mvn clean compile exec:java ## More information ## [http://azure.com/java](http://azure.com/java) If you don't have a Microsoft Azure subscription you can get a FREE trial account [here](http://go.microsoft.com/fwlink/?LinkId=330212). ================================================ FILE: samples/java/task/acb.yaml ================================================ version: v1.1.0 steps: - build: -t $Registry/java-sample:$ID . - push: - $Registry/java-sample:$ID ================================================ FILE: samples/java/task/pom.xml ================================================ 4.0.0 com.microsoft.azure containerrregistry-task-sample 1.0-SNAPSHOT jar containerregistry-task-sample http://maven.apache.org UTF-8 1.8 1.8 ossrh Sonatype Snapshots https://oss.sonatype.org/content/repositories/snapshots/ default true always io.reactivex rxjava 1.2.4 com.microsoft.azure.containerregistry.v2018_09_01 azure-mgmt-containerregistry 1.0.0-beta-1 com.microsoft.azure azure-client-authentication 1.5.0 com.microsoft.azure azure-client-runtime 1.5.0 com.microsoft.azure azure-arm-client-runtime 1.5.0 org.slf4j slf4j-simple 1.7.21 com.microsoft.azure azure-mgmt-resources 1.9.0 junit junit 4.13.1 test org.apache.httpcomponents httpclient 4.5.13 ================================================ FILE: samples/java/task/src/main/java/com/microsoft/azure/management/containerregistry/samples/ManageTask.java ================================================ package com.microsoft.azure.management.containerregistry.samples; import com.microsoft.azure.arm.resources.Region; import com.microsoft.azure.arm.utils.SdkContext; import com.microsoft.azure.credentials.AzureCliCredentials; import com.microsoft.azure.management.containerregistry.v2018_09_01.*; import com.microsoft.azure.management.containerregistry.v2018_09_01.implementation.ContainerRegistryManager; import com.microsoft.azure.management.resources.ResourceGroup; import com.microsoft.azure.management.resources.implementation.ResourceManager; import com.microsoft.rest.LogLevel; import org.apache.http.HttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.time.LocalDateTime; import java.util.Arrays; public class ManageTask { public static void main( String[] args ) throws IOException, InterruptedException { String region = Region.US_EAST.toString(); String rgName = SdkContext.randomResourceName("rg", 20); String acrName = SdkContext.randomResourceName("acr", 20); // Read the Azure credentail from the auth file // See how to create an auth file: https://github.com/Azure/azure-libraries-for-java/blob/master/AUTH.md AzureCliCredentials credentials = AzureCliCredentials.create(); // Create a new resource group ResourceManager resourceManager = ResourceManager .configure() .withLogLevel(LogLevel.BASIC) .authenticate(credentials) .withSubscription(credentials.defaultSubscriptionId()); ResourceGroup resourceGroup = resourceManager.resourceGroups() .define(rgName) .withRegion(region) .create(); System.out.printf("New resource group: %s\n", rgName); // Create a new Azure Contaienr Registry ContainerRegistryManager manager = ContainerRegistryManager .configure() .withLogLevel(LogLevel.BASIC) .authenticate(credentials, credentials.defaultSubscriptionId()); Registry registry = manager.registries().define(acrName) .withRegion(region) .withExistingResourceGroup(rgName) .withSku(new Sku().withName(SkuName.BASIC)) .create(); System.out.printf("New registry: %s\n", registry.name()); // Build a container image using an existing github repositry and push the image to the registry RunRequest runRequest = new DockerBuildRequest() .withImageNames(Arrays.asList("java-sample:{{.Run.ID}}")) .withIsPushEnabled(true) .withPlatform(new PlatformProperties().withOs(OS.LINUX).withArchitecture(Architecture.AMD64)) .withSourceLocation("https://github.com/Azure/acr.git#master:samples/java/task") .withDockerFilePath("Dockerfile") .withTimeout(60*10) .withAgentConfiguration(new AgentProperties().withCpu(2)); Run run = manager.registries().scheduleRunAsync(rgName, acrName, runRequest).toBlocking().first(); String runId = run.runId(); System.out.printf("New run: %s\n", runId); // Poll the run status and wait for completion while (runInProgress(run.status())) { System.out.printf("%tT: In progress: %s. Wait 10 seconds\n", LocalDateTime.now(), run.status()); Thread.sleep(10000); run = manager.runs().getAsync(rgName, acrName, runId).toBlocking().first(); } // Get the log link RunGetLogResult logResult = manager.runs().getLogSasUrlAsync(rgName, acrName, runId).toBlocking().first(); String logLink = logResult.logLink(); try (CloseableHttpClient httpClient = HttpClientBuilder.create().build()) { HttpGet logRequest = new HttpGet(logLink); HttpResponse logResponse = httpClient.execute(logRequest); BufferedReader bufferReader = new BufferedReader( new InputStreamReader(logResponse.getEntity().getContent())); String line; while ((line = bufferReader.readLine()) != null) { System.out.println(line); } } // Create a task to automatically schedule run based on push commit and pull request String githubRepoUrl = "Replace with your github repository url, eg: https://github.com/Azure/acr.git"; String githubContext = "Replace with your github repository url with context, eg: https://github.com/Azure/acr.git#master:samples/java/task"; String githubBranch = "Replace with your github repositoty branch, eg: master"; String githubPAT = "Replace with your github personal access token which should have the scopes: admin:repo_hook and repo"; String dockerFilePath = "Replace with your docker file path relative to githubContext, eg: Dockerfile"; PlatformProperties platform = new PlatformProperties() .withOs(OS.LINUX) .withArchitecture(Architecture.AMD64); TaskStepProperties step = new DockerBuildStep() .withImageNames(Arrays.asList("java-sample:{{.Run.ID}}")) .withDockerFilePath(dockerFilePath) .withContextPath(githubContext); BaseImageTrigger baseImageTrigger = new BaseImageTrigger() .withName("SampleBaseImageTrigger") .withBaseImageTriggerType(BaseImageTriggerType.RUNTIME); SourceTrigger sourceTrigger = new SourceTrigger() .withName("SampleSourceTrigger") .withSourceRepository(new SourceProperties() .withSourceControlType(SourceControlType.GITHUB) .withBranch(githubBranch) .withRepositoryUrl(githubRepoUrl) .withSourceControlAuthProperties(new AuthInfo().withTokenType(TokenType.PAT).withToken(githubPAT))) .withSourceTriggerEvents(Arrays.asList(SourceTriggerEvent.COMMIT, SourceTriggerEvent.PULLREQUEST)) .withStatus(TriggerStatus.ENABLED); TriggerProperties trigger = new TriggerProperties() .withBaseImageTrigger(baseImageTrigger) .withSourceTriggers(Arrays.asList(sourceTrigger)); AgentProperties agentConfiguration = new AgentProperties().withCpu(2); Task task = manager.tasks().define("SampleTask") .withExistingRegistry(rgName, acrName) .withLocation(region) .withPlatform(platform) .withStep(step) .withTrigger(trigger) .withAgentConfiguration(agentConfiguration) .create(); // After you create the task, you can push a change or create a pull request to you github repository to trigger a run // The following code manually triggers a new run using the task runRequest = new TaskRunRequest() .withTaskName(task.name()); run = manager.registries().scheduleRunAsync(rgName, acrName, runRequest).toBlocking().first(); System.out.printf("New run: %s\n", run.runId()); // Schedule a multi-step task run runRequest = new FileTaskRunRequest() .withPlatform(new PlatformProperties().withOs(OS.LINUX)) .withSourceLocation("https://github.com/Azure/acr.git#master:samples/java/task") .withTaskFilePath("acb.yaml") .withTimeout(60*10) .withAgentConfiguration(new AgentProperties().withCpu(2)); run = manager.registries().scheduleRunAsync(rgName, acrName, runRequest).toBlocking().first(); System.out.printf("New run: %s\n", run.runId()); // List all runs in the registry System.out.println("List runs:"); manager.runs().listAsync(rgName, acrName).toBlocking().forEach(r -> System.out.printf("Run: %s: %s\n", r.runId(), r.status().toString())); // Clean the resource resourceManager.resourceGroups().deleteByName(resourceGroup.name()); } private static boolean runInProgress(RunStatus runStatus) { return runStatus == RunStatus.QUEUED || runStatus == RunStatus.STARTED || runStatus == RunStatus.RUNNING; } }