@ -0,0 +1,92 @@ | |||
parameters: | |||
services: '' | |||
registryEndpoint: '' | |||
helmfrom: '' | |||
helmto: '' | |||
jobs: | |||
- job: BuildContainersForPR_Linux | |||
condition: eq('${{ variables['Build.Reason'] }}', 'PullRequest') | |||
pool: | |||
vmImage: 'ubuntu-16.04' | |||
steps: | |||
- bash: docker-compose build ${{ parameters.services }} | |||
displayName: Create multiarch manifest | |||
env: | |||
TAG: ${{ variables['Build.SourceBranchName'] }} | |||
- job: BuildContainersForPR_Windows | |||
condition: eq('${{ variables['Build.Reason'] }}', 'PullRequest') | |||
pool: | |||
vmImage: 'windows-2019' | |||
steps: | |||
- bash: docker-compose build ${{ parameters.services }} | |||
displayName: Create multiarch manifest | |||
env: | |||
TAG: ${{ variables['Build.SourceBranchName'] }} | |||
PLATFORM: win | |||
NODE_IMAGE: stefanscherer/node-windows:10 | |||
- job: BuildLinux | |||
condition: ne('${{ variables['Build.Reason'] }}', 'PullRequest') | |||
pool: | |||
vmImage: 'ubuntu-16.04' | |||
steps: | |||
- task: DockerCompose@0 | |||
displayName: Compose build ${{ parameters.services }} | |||
inputs: | |||
dockerComposeCommand: 'build ${{ parameters.services }}' | |||
containerregistrytype: Container Registry | |||
dockerRegistryEndpoint: ${{ parameters.registryEndpoint }} | |||
dockerComposeFile: docker-compose.yml | |||
qualifyImageNames: true | |||
projectName: "" | |||
dockerComposeFileArgs: | | |||
TAG=${{ variables['Build.SourceBranchName'] }} | |||
- task: DockerCompose@0 | |||
displayName: Compose push ${{ parameters.images }} | |||
inputs: | |||
dockerComposeCommand: 'push ${{ parameters.services }}' | |||
containerregistrytype: Container Registry | |||
dockerRegistryEndpoint: ${{ parameters.registryEndpoint }} | |||
dockerComposeFile: docker-compose.yml | |||
qualifyImageNames: true | |||
projectName: "" | |||
dockerComposeFileArgs: | | |||
TAG=${{ variables['Build.SourceBranchName'] }} | |||
- task: CopyFiles@2 | |||
inputs: | |||
sourceFolder: ${{ parameters.helmfrom }} | |||
targetFolder: ${{ parameters.helmto }} | |||
- task: PublishBuildArtifacts@1 | |||
inputs: | |||
pathtoPublish: ${{ parameters.helmto }} | |||
artifactName: helm | |||
- job: BuildWindows | |||
condition: ne('${{ variables['Build.Reason'] }}', 'PullRequest') | |||
pool: | |||
vmImage: 'windows-2019' | |||
steps: | |||
- task: DockerCompose@0 | |||
displayName: Compose build ${{ parameters.services }} | |||
inputs: | |||
dockerComposeCommand: 'build ${{ parameters.services }}' | |||
containerregistrytype: Container Registry | |||
dockerRegistryEndpoint: ${{ parameters.registryEndpoint }} | |||
dockerComposeFile: docker-compose.yml | |||
qualifyImageNames: true | |||
projectName: "" | |||
dockerComposeFileArgs: | | |||
TAG=${{ variables['Build.SourceBranchName'] }} | |||
PLATFORM=win | |||
NODE_IMAGE=stefanscherer/node-windows:10 | |||
- task: DockerCompose@0 | |||
displayName: Compose push ${{ parameters.services }} | |||
inputs: | |||
dockerComposeCommand: 'push ${{ parameters.services }}' | |||
containerregistrytype: Container Registry | |||
dockerRegistryEndpoint: ${{ parameters.registryEndpoint }} | |||
dockerComposeFile: docker-compose.yml | |||
qualifyImageNames: true | |||
projectName: "" | |||
dockerComposeFileArgs: | | |||
TAG=${{ variables['Build.SourceBranchName'] }} | |||
PLATFORM=win |
@ -0,0 +1,25 @@ | |||
pool: | |||
vmImage: 'ubuntu-16.04' | |||
variables: | |||
registryEndpoint: eshop-registry | |||
trigger: | |||
branches: | |||
include: | |||
- master | |||
- dev | |||
paths: | |||
include: | |||
- k8s/helm/basket-data/* | |||
- k8s/helm/keystore-data/* | |||
- k8s/helm/nosql-data/* | |||
- k8s/helm/rabbitmq/* | |||
- k8s/helm/sql-data/* | |||
steps: | |||
- task: CopyFiles@2 | |||
inputs: | |||
sourceFolder: $(Build.SourcesDirectory)/k8s/helm | |||
targetFolder: $(Build.ArtifactStagingDirectory)/k8s/helm | |||
- task: PublishBuildArtifacts@1 | |||
inputs: | |||
pathtoPublish: $(Build.ArtifactStagingDirectory)/k8s/helm | |||
artifactName: helm |
@ -0,0 +1,30 @@ | |||
parameters: | |||
image: '' | |||
branch: '' | |||
registry: 'eshop' | |||
registryEndpoint: '' | |||
jobs: | |||
- job: manifest | |||
condition: and(succeeded(),ne('${{ variables['Build.Reason'] }}', 'PullRequest')) | |||
dependsOn: | |||
- BuildWindows | |||
- BuildLinux | |||
pool: | |||
vmImage: 'Ubuntu 16.04' | |||
steps: | |||
- task: Docker@1 | |||
displayName: Docker Login | |||
inputs: | |||
command: login | |||
containerregistrytype: 'Container Registry' | |||
dockerRegistryEndpoint: ${{ parameters.registryEndpoint }} | |||
- bash: | | |||
mkdir -p ~/.docker | |||
sed '$ s/.$//' $DOCKER_CONFIG/config.json > ~/.docker/config.json | |||
echo ',"experimental": "enabled" }' >> ~/.docker/config.json | |||
docker --config ~/.docker manifest create ${{ parameters.registry }}/${{ parameters.image }}:${{ parameters.branch }} ${{ parameters.registry }}/${{ parameters.image }}:linux-${{ parameters.branch }} ${{ parameters.registry }}/${{ parameters.image }}:win-${{ parameters.branch }} | |||
docker --config ~/.docker manifest create ${{ parameters.registry }}/${{ parameters.image }}:latest ${{ parameters.registry }}/${{ parameters.image }}:linux-latest ${{ parameters.registry }}/${{ parameters.image }}:win-latest | |||
docker --config ~/.docker manifest push ${{ parameters.registry }}/${{ parameters.image }}:${{ parameters.branch }} | |||
docker --config ~/.docker manifest push ${{ parameters.registry }}/${{ parameters.image }}:latest | |||
displayName: Create multiarch manifest |
@ -0,0 +1,26 @@ | |||
Param( | |||
[parameter(Mandatory=$true)][string]$registry | |||
) | |||
if ([String]::IsNullOrEmpty($registry)) { | |||
Write-Host "Registry must be set to docker registry to use" -ForegroundColor Red | |||
exit 1 | |||
} | |||
Write-Host "This script creates the local manifests, for pushing the multi-arch manifests" -ForegroundColor Yellow | |||
Write-Host "Tags used are linux-master, win-master, linux-dev, win-dev, linux-latest, win-latest" -ForegroundColor Yellow | |||
Write-Host "Multiarch images tags will be master, dev, latest" -ForegroundColor Yellow | |||
$services = "identity.api", "basket.api", "catalog.api", "ordering.api", "ordering.backgroundtasks", "marketing.api", "payment.api", "locations.api", "webhooks.api", "ocelotapigw", "mobileshoppingagg", "webshoppingagg", "ordering.signalrhub", "webstatus", "webspa", "webmvc", "webhooks.client" | |||
foreach ($svc in $services) { | |||
Write-Host "Creating manifest for $svc and tags :latest, :master, and :dev" | |||
docker manifest create $registry/${svc}:master $registry/${svc}:linux-master $registry/${svc}:win-master | |||
docker manifest create $registry/${svc}:dev $registry/${svc}:linux-dev $registry/${svc}:win-dev | |||
docker manifest create $registry/${svc}:latest $registry/${svc}:linux-latest $registry/${svc}:win-latest | |||
Write-Host "Pushing manifest for $svc and tags :latest, :master, and :dev" | |||
docker manifest push $registry/${svc}:latest | |||
docker manifest push $registry/${svc}:dev | |||
docker manifest push $registry/${svc}:master | |||
} |
@ -0,0 +1,2 @@ | |||
#Requires -RunAsAdministrator | |||
Get-NetConnectionProfile | Where-Object { $_.InterfaceAlias -match "(DockerNAT)" } | ForEach-Object { Set-NetConnectionProfile -InterfaceIndex $_.InterfaceIndex -NetworkCategory Private } |
@ -0,0 +1,37 @@ | |||
version: '3.4' | |||
services: | |||
elasticsearch: | |||
build: | |||
context: elk/elasticsearch/ | |||
volumes: | |||
- ./elk/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro | |||
ports: | |||
- "9200:9200" | |||
- "9300:9300" | |||
environment: | |||
ES_JAVA_OPTS: "-Xmx256m -Xms256m" | |||
logstash: | |||
build: | |||
context: elk/logstash/ | |||
volumes: | |||
- ./elk/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro | |||
- ./elk/logstash/pipeline:/usr/share/logstash/pipeline:ro | |||
ports: | |||
- "8080:8080" | |||
environment: | |||
LS_JAVA_OPTS: "-Xmx256m -Xms256m" | |||
depends_on: | |||
- elasticsearch | |||
kibana: | |||
build: | |||
context: elk/kibana/ | |||
volumes: | |||
- ./elk/kibana/config/:/usr/share/kibana/config:ro | |||
ports: | |||
- "5601:5601" | |||
depends_on: | |||
- elasticsearch |
@ -1,8 +0,0 @@ | |||
#!/bin/sh | |||
export NODE_DOWNLOAD_SHA 0e20787e2eda4cc31336d8327556ebc7417e8ee0a6ba0de96a09b0ec2b841f60 | |||
curl -SL "https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-x64.tar.gz" --output nodejs.tar.gz \ | |||
&& echo "$NODE_DOWNLOAD_SHA nodejs.tar.gz" | sha256sum -c - \ | |||
&& tar -xzf "nodejs.tar.gz" -C /usr/local --strip-components=1 \ | |||
&& rm nodejs.tar.gz \ | |||
&& ln -s /usr/local/bin/node /usr/local/bin/nodejs |
@ -1,4 +0,0 @@ | |||
set NODE_VERSION=8.11.1 | |||
curl -SL "https://nodejs.org/dist/v%NODE_VERSION%/node-v%NODE_VERSION%-win-x64.zip" --output nodejs.zip | |||
tar -xf nodejs.zip -C c:\ | |||
setx PATH "%PATH%;c:\node-v%NODE_VERSION%-win-x64" |
@ -0,0 +1,88 @@ | |||
This article contains a brief introduction to centralized structured logging with [Serilog](https://serilog.net/) and event viewing with [ELK](https://www.elastic.co/elk-stack) in eShopOnContainers. ELK is an acronym of ElasticSearch, LogStash and Kibana. This is one of the most used tools in the industry standards. | |||
![](img/elk/kibana-working.png) | |||
## Wiring eshopOnContainers with ELK in Localhost | |||
eshopOnContainers is ready for work with ELK, you only need to setup the configuration parameter **LogstashUrl**, in **Serilog** Section, for achieve this, you can do it modifing this parameter in every appsettings.json of every service, or via Environment Variable **Serilog:LogstashUrl**. | |||
There is another option, a zero-configuration environment for testing the integration launching via ```docker-compose``` command, on the root directory of eshopOnContainers: | |||
```sh | |||
docker-compose -f docker-compose.yml -f docker-compose.override.yml -f docker-compose.elk.yml build | |||
docker-compose -f docker-compose.yml -f docker-compose.override.yml -f docker-compose.elk.yml up | |||
``` | |||
### Configuring Logstash index on Kibana | |||
Once time you have started and configured your application, you only need to configure the logstash index on kibana. | |||
You can address to Kibana, with docker-compose setup is at [http://localhost:5601](http://localhost:5601) | |||
If you have accessed to kibana too early, you can see this error. It's normal, depending of your machine the kibana stack needs a bit of time to startup. | |||
![](img/elk/kibana_startup.png) | |||
You can wait a bit and refresh the page, the first time you enter, you need to configure and index pattern, in the ```docker-compose``` configuration, the index pattern name is **eshops-\***. | |||
![](img/elk/kibana_eshops_index.png) | |||
With the index pattern configured, you can enter in the discover section and start viewing how the tool is recollecting the logging information. | |||
![](img/elk/kibana_result.png) | |||
## Configuring ELK on Azure VM | |||
Another option is to use a preconfigured virtual machine with Logstash, ElasticSearch and Kibana and point the configuration parameter **LogstashUrl**. For doing this you can address to Microsoft Azure, and start searching a Certified ELK Virtual Machine | |||
![](img/elk/create-vm-elk-azure.png) | |||
This options it have a certified preconfigured options (Network, VirtualMachine type, OS, RAM, Disks) for having a good starting point of ELK with good performance. | |||
![](img/elk/create-vm-elk-azure-summary.png) | |||
When you have configured the main aspects of your virtual machine, you will have a "review & create" last step like this: | |||
![](img/elk/create-vm-elk-azure-last-step.png) | |||
### Configuring the bitnami environment | |||
This virtual machine has a lot of configuration pipeing done. If you want to change something of the default configuration you can address this documentation: | |||
[https://docs.bitnami.com/virtual-machine/apps/elk/get-started/](https://docs.bitnami.com/virtual-machine/apps/elk/get-started/) | |||
The only thing you have to change is the logstash configuration inside the machine. This configuration is at the file ```/opt/bitnami/logstash/conf/logstash.conf``` | |||
You must edit the file and overwrite with this configuration: | |||
```conf | |||
input { | |||
http { | |||
#default host 0.0.0.0:8080 | |||
codec => json | |||
} | |||
} | |||
## Add your filters / logstash plugins configuration here | |||
filter { | |||
split { | |||
field => "events" | |||
target => "e" | |||
remove_field => "events" | |||
} | |||
} | |||
output { | |||
elasticsearch { | |||
hosts => "elasticsearch:9200" | |||
index=>"eshops-%{+xxxx.ww}" | |||
} | |||
} | |||
``` | |||
For doing this you can connect via ssh to the vm and edit the file using the vi editor for example. | |||
When the file will be edited, check there are Inbound Port Rules created for the logstash service. You can do it going to Networking Menu on your ELK Virtual Machine Resource in Azure. | |||
![](img/elk/azure-nsg-inboundportsConfig.png) | |||
The only thing that remains is to connect to your vm vía browser. And check the bitnami splash page is showing. | |||
![](img/elk/bitnami_splash.png) | |||
You can get the password for accessing going to your virtual machine in azure and check the boot diagnostics, theres a message that shows to you which is your password. | |||
When you have the user and password you can access to the kibana tool, and create the ```eshops-*``` index pattern that is well documented at the beggining of this documentation and then start to discover. | |||
![](img/elk/) |
@ -0,0 +1,5 @@ | |||
# https://github.com/elastic/elasticsearch-docker | |||
FROM docker.elastic.co/elasticsearch/elasticsearch-oss:6.0.0 | |||
# Add your elasticsearch plugins setup here | |||
# Example: RUN elasticsearch-plugin install analysis-icu |
@ -0,0 +1,16 @@ | |||
--- | |||
## Default Elasticsearch configuration from elasticsearch-docker. | |||
## from https://github.com/elastic/elasticsearch-docker/blob/master/build/elasticsearch/elasticsearch.yml | |||
# | |||
cluster.name: "docker-cluster" | |||
network.host: 0.0.0.0 | |||
# minimum_master_nodes need to be explicitly set when bound on a public IP | |||
# set to 1 to allow single node clusters | |||
# Details: https://github.com/elastic/elasticsearch/pull/17288 | |||
discovery.zen.minimum_master_nodes: 1 | |||
## Use single node discovery in order to disable production mode and avoid bootstrap checks | |||
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html | |||
# | |||
discovery.type: single-node |
@ -0,0 +1,5 @@ | |||
# https://github.com/elastic/kibana-docker | |||
FROM docker.elastic.co/kibana/kibana-oss:6.0.0 | |||
# Add your kibana plugins setup here | |||
# Example: RUN kibana-plugin install <name|url> |
@ -0,0 +1,7 @@ | |||
--- | |||
## Default Kibana configuration from kibana-docker. | |||
## from https://github.com/elastic/kibana-docker/blob/master/build/kibana/config/kibana.yml | |||
# | |||
server.name: kibana | |||
server.host: "0" | |||
elasticsearch.url: http://elasticsearch:9200 |
@ -0,0 +1,6 @@ | |||
# https://github.com/elastic/logstash-docker | |||
FROM docker.elastic.co/logstash/logstash-oss:6.0.0 | |||
# Add your logstash plugins setup here | |||
# Example: RUN logstash-plugin install logstash-filter-json | |||
RUN logstash-plugin install logstash-input-http |
@ -0,0 +1,6 @@ | |||
--- | |||
## Default Logstash configuration from logstash-docker. | |||
## from https://github.com/elastic/logstash-docker/blob/master/build/logstash/config/logstash-oss.yml | |||
# | |||
http.host: "0.0.0.0" | |||
path.config: /usr/share/logstash/pipeline |
@ -0,0 +1,22 @@ | |||
input { | |||
http { | |||
#default host 0.0.0.0:8080 | |||
codec => json | |||
} | |||
} | |||
## Add your filters / logstash plugins configuration here | |||
filter { | |||
split { | |||
field => "events" | |||
target => "e" | |||
remove_field => "events" | |||
} | |||
} | |||
output { | |||
elasticsearch { | |||
hosts => "elasticsearch:9200" | |||
index=>"eshops-%{+xxxx.ww}" | |||
} | |||
} |
@ -0,0 +1,2 @@ | |||
kubectl apply -f .\nodeports\rabbitmq-admin.yaml | |||
kubectl apply -f .\nodeports\sql-services.yaml |
@ -0,0 +1,37 @@ | |||
{{- if .Values.ingress.enabled -}} | |||
{{- $ingressPath := include "pathBase" . -}} | |||
{{- $serviceName := .Values.app.svc.basket -}} | |||
apiVersion: extensions/v1beta1 | |||
kind: Ingress | |||
metadata: | |||
name: {{ template "basket-api.fullname" . }} | |||
labels: | |||
app: {{ template "basket-api.name" . }} | |||
chart: {{ template "basket-api.chart" . }} | |||
release: {{ .Release.Name }} | |||
heritage: {{ .Release.Service }} | |||
{{- with .Values.ingress.annotations }} | |||
annotations: | |||
{{ toYaml . | indent 4 }} | |||
{{- end }} | |||
spec: | |||
{{- if .Values.ingress.tls }} | |||
tls: | |||
{{- range .Values.ingress.tls }} | |||
- hosts: | |||
- {{ .Values.inf.k8s.dns }} | |||
secretName: {{ .secretName }} | |||
{{- end }} | |||
{{- end }} | |||
rules: | |||
{{- range .Values.ingress.hosts }} | |||
- host: {{ . }} | |||
http: | |||
paths: | |||
- path: {{ $ingressPath }} | |||
backend: | |||
serviceName: {{ $serviceName }} | |||
servicePort: http | |||
{{- end }} | |||
{{- end }} |
@ -0,0 +1,37 @@ | |||
{{- if .Values.ingress.enabled -}} | |||
{{- $ingressPath := include "pathBase" . -}} | |||
{{- $serviceName := .Values.app.svc.catalog -}} | |||
apiVersion: extensions/v1beta1 | |||
kind: Ingress | |||
metadata: | |||
name: {{ template "catalog-api.fullname" . }} | |||
labels: | |||
app: {{ template "catalog-api.name" . }} | |||
chart: {{ template "catalog-api.chart" . }} | |||
release: {{ .Release.Name }} | |||
heritage: {{ .Release.Service }} | |||
{{- with .Values.ingress.annotations }} | |||
annotations: | |||
{{ toYaml . | indent 4 }} | |||
{{- end }} | |||
spec: | |||
{{- if .Values.ingress.tls }} | |||
tls: | |||
{{- range .Values.ingress.tls }} | |||
- hosts: | |||
- {{ .Values.inf.k8s.dns }} | |||
secretName: {{ .secretName }} | |||
{{- end }} | |||
{{- end }} | |||
rules: | |||
{{- range .Values.ingress.hosts }} | |||
- host: {{ . }} | |||
http: | |||
paths: | |||
- path: {{ $ingressPath }} | |||
backend: | |||
serviceName: {{ $serviceName }} | |||
servicePort: http | |||
{{- end }} | |||
{{- end }} |
@ -0,0 +1,116 @@ | |||
Param( | |||
[parameter(Mandatory=$false)][string]$registry, | |||
[parameter(Mandatory=$false)][bool]$installIstioOnSystem=$false, | |||
[parameter(Mandatory=$false)][string]$dockerUser, | |||
[parameter(Mandatory=$false)][string]$dockerPassword, | |||
[parameter(Mandatory=$false)][string]$externalDns="aks", | |||
[parameter(Mandatory=$false)][string]$dnsname="eshoptestistio", | |||
[parameter(Mandatory=$false)][string]$appName="eshop", | |||
[parameter(Mandatory=$false)][bool]$deployInfrastructure=$true, | |||
[parameter(Mandatory=$false)][string]$kialiuser="admin", | |||
[parameter(Mandatory=$false)][string]$kialipasswrd="admin", | |||
[parameter(Mandatory=$false)][bool]$clean=$true, | |||
[parameter(Mandatory=$false)][string]$aksName="", | |||
[parameter(Mandatory=$false)][string]$aksRg="", | |||
[parameter(Mandatory=$false)][string]$imageTag="latest", | |||
[parameter(Mandatory=$false)][bool]$useLocalk8s=$false | |||
) | |||
$dns = $externalDns | |||
# Instalamos Istio | |||
# Specify the Istio version that will be leveraged throughout these instructions | |||
$ISTIO_VERSION="1.0.6" | |||
# Windows | |||
$ProgressPreference = 'SilentlyContinue'; | |||
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 | |||
Invoke-WebRequest -URI "https://github.com/istio/istio/releases/download/$ISTIO_VERSION/istio-$ISTIO_VERSION-win.zip" -OutFile "istio-$ISTIO_VERSION.zip" | |||
Remove-Item istio-$ISTIO_VERSION -Recurse -ErrorAction Ignore | |||
Expand-Archive -Path "istio-$ISTIO_VERSION.zip" -DestinationPath . | |||
if($installIstioOnSystem -eq $true) { | |||
New-Item -ItemType Directory -Force -Path "C:\Program Files\Istio" | |||
mv ./istio-$ISTIO_VERSION/bin/istioctl.exe "C:\Program Files/Istio/" | |||
$PATH = [environment]::GetEnvironmentVariable("PATH", "User") | |||
[environment]::SetEnvironmentVariable("PATH", $PATH + "; C:\Program Files\Istio", "User") | |||
} | |||
# Primero Desinstalamos cualquier cosa que haya en el cluster | |||
if ($clean -eq $true) { | |||
Write-Host "Cleaning previous helm releases..." -ForegroundColor Green | |||
helm delete --purge $(helm ls -q) | |||
kubectl delete -f istio-$ISTIO_VERSION/install/kubernetes/helm/istio/templates/crds.yaml -n istio-system | |||
Write-Host "Previous releases deleted" -ForegroundColor Green | |||
} | |||
Write-Host "Generating Kiali Credentials" -ForegroundColor Green | |||
#generamos la credenciales para que kiali arranque sin problemas | |||
kubectl -n istio-system create secret generic kiali --from-literal=username=$kialiuser --from-literal=passphrase=$kialipasswrd | |||
Write-Host "Deploying Istio in the cluster" -ForegroundColor Green | |||
helm install istio-$ISTIO_VERSION/install/kubernetes/helm/istio --wait --name istio --namespace istio-system --set global.controlPlaneSecurityEnabled=true --set grafana.enabled=true --set tracing.enabled=true --set kiali.enabled=true | |||
Write-Host "Setting Up Gateway" | |||
kubectl delete gateway istio-autogenerated-k8s-ingress -n istio-system | |||
kubectl apply -f ./istio/gateway.yml | |||
if ($useLocalk8s -eq $true) { | |||
$dns="localhost" | |||
$externalDns="localhost" | |||
} | |||
else { | |||
Write-Host "Resolving DNS to Gateway public IP" -ForegroundColor Green | |||
$ipaddress = $(kubectl get service istio-ingressgateway -n istio-system)[1] | %{ $_.Split(' ')[9];} | |||
$query = "[?ipAddress!=null]|[?contains([ipAddress], '$ipaddress')].[id]" | |||
$resid = az network public-ip list --query $query --output tsv | |||
$jsonresponse = az network public-ip update --ids $resid --dns-name $dnsname | |||
$externalDns = ($jsonresponse | ConvertFrom-Json).dnsSettings.fqdn | |||
Write-Host "$externalDns is pointing to Cluster public ip $ipaddress" | |||
} | |||
$useCustomRegistry=$false | |||
if (-not [string]::IsNullOrEmpty($registry)) { | |||
$useCustomRegistry=$true | |||
if ([string]::IsNullOrEmpty($dockerUser) -or [string]::IsNullOrEmpty($dockerPassword)) { | |||
Write-Host "Error: Must use -dockerUser AND -dockerPassword if specifying custom registry" -ForegroundColor Red | |||
exit 1 | |||
} | |||
} | |||
Write-Host "Begin eShopOnContainers installation using Helm" -ForegroundColor Green | |||
$infras = ("sql-data", "nosql-data", "rabbitmq", "keystore-data", "basket-data") | |||
$charts = ("eshop-common", "apigwmm", "apigwms", "apigwwm", "apigwws", "basket-api","catalog-api", "identity-api", "locations-api", "marketing-api", "mobileshoppingagg","ordering-api","ordering-backgroundtasks","ordering-signalrhub", "payment-api", "webmvc", "webshoppingagg", "webspa", "webstatus", "webhooks-api", "webhooks-web") | |||
if ($deployInfrastructure) { | |||
foreach ($infra in $infras) { | |||
Write-Host "Installing infrastructure: $infra" -ForegroundColor Green | |||
helm install --values app.yaml --values inf.yaml --set app.name=$appName --set inf.k8s.dns=$externalDns --name="$appName-$infra" $infra | |||
} | |||
} | |||
foreach ($chart in $charts) { | |||
Write-Host "Installing: $chart" -ForegroundColor Green | |||
if ($useCustomRegistry) { | |||
helm install --set inf.registry.server=$registry --set inf.registry.login=$dockerUser --set inf.registry.pwd=$dockerPassword --set inf.registry.secretName=eshop-docker-scret --values app.yaml --values inf.yaml --values $ingressValuesFile --set app.name=$appName --set inf.k8s.dns=$dns --set image.tag=$imageTag --set image.pullPolicy=Always --name="$appName-$chart" $chart | |||
} | |||
else { | |||
if ($chart -ne "eshop-common") { # eshop-common is ignored when no secret must be deployed | |||
helm install --values app.yaml --values inf.yaml --set app.name=$appName --set inf.k8s.dns=$externalDns --set image.tag=$imageTag --set image.pullPolicy=Always --name="$appName-$chart" $chart | |||
} | |||
} | |||
} | |||
Write-Host "helm charts installed." -ForegroundColor Green | |||
Write-Host "Appling Virtual Services for routing." -ForegroundColor Green | |||
kubectl apply -f ./istio/virtualservices.yml | |||
Remove-Item istio-$ISTIO_VERSION -Recurse -ErrorAction Ignore | |||
Remove-Item istio-$ISTIO_VERSION.zip -Recurse -ErrorAction Ignore | |||
@ -0,0 +1,232 @@ | |||
#!/usr/bin/env bash | |||
# http://redsymbol.net/articles/unofficial-bash-strict-mode | |||
set -euo pipefail | |||
usage() | |||
{ | |||
cat <<END | |||
deploy.sh: deploys the $app_name application to a Kubernetes cluster using Helm. | |||
Parameters: | |||
--aks-name <AKS cluster name> | |||
The name of the AKS cluster. Required when the registry (using the -r parameter) is set to "aks". | |||
--aks-rg <AKS resource group> | |||
The resource group for the AKS cluster. Required when the registry (using the -r parameter) is set to "aks". | |||
-b | --build-solution | |||
Force a solution build before deployment (default: false). | |||
-d | --dns <dns or ip address> | --dns aks | |||
Specifies the external DNS/ IP address of the Kubernetes cluster. | |||
If 'aks' is set as value, the DNS value is retrieved from the AKS. --aks-name and --aks-rg are needed. | |||
When --use-local-k8s is specified the external DNS is automatically set to localhost. | |||
-h | --help | |||
Displays this help text and exits the script. | |||
--image-build | |||
Build images (default is to not build all images). | |||
--image-push | |||
Upload images to the container registry (default is not pushing to the custom registry) | |||
-n | --app-name <the name of the app> | |||
Specifies the name of the application (default: eshop). | |||
--namespace <namespace name> | |||
Specifies the namespace name to deploy the app. If it doesn't exists it will be created (default: eshop). | |||
-p | --docker-password <docker password> | |||
The Docker password used to logon to the custom registry, supplied using the -r parameter. | |||
-r | --registry <container registry> | |||
Specifies the container registry to use (required), e.g. myregistry.azurecr.io. | |||
--skip-clean | |||
Do not clean the Kubernetes cluster (default is to clean the cluster). | |||
--skip-infrastructure | |||
Do not deploy infrastructure resources (like sql-data, no-sql or redis). | |||
This is useful for production environments where infrastructure is hosted outside the Kubernetes cluster. | |||
-t | --tag <docker image tag> | |||
The tag used for the newly created docker images. Default: latest. | |||
-u | --docker-username <docker username> | |||
The Docker username used to logon to the custom registry, supplied using the -r parameter. | |||
--use-local-k8s | |||
Deploy to a locally installed Kubernetes (default: false). | |||
It is assumed that the Kubernetes cluster has been granted access to the container registry. | |||
If using AKS and ACR see link for more info: | |||
https://docs.microsoft.com/en-us/azure/container-registry/container-registry-auth-aks | |||
WARNING! THE SCRIPT WILL COMPLETELY DESTROY ALL DEPLOYMENTS AND SERVICES VISIBLE | |||
FROM THE CURRENT CONFIGURATION CONTEXT AND NAMESPACE. | |||
It is recommended that you check your selected namespace, 'eshop' by default, is already in use. | |||
Every deployment and service done in the namespace will be deleted. | |||
For more information see https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ | |||
END | |||
} | |||
app_name='eshop' | |||
aks_name='' | |||
aks_rg='' | |||
build_images='' | |||
clean='yes' | |||
build_solution='' | |||
container_registry='' | |||
docker_password='' | |||
docker_username='' | |||
dns='' | |||
image_tag='latest' | |||
push_images='' | |||
skip_infrastructure='' | |||
use_local_k8s='' | |||
namespace='eshop' | |||
while [[ $# -gt 0 ]]; do | |||
case "$1" in | |||
--aks-name ) | |||
aks_name="$2"; shift 2;; | |||
--aks-rg ) | |||
aks_rg="$2"; shift 2;; | |||
-b | --build-solution ) | |||
build_solution='yes'; shift ;; | |||
-d | --dns ) | |||
dns="$2"; shift 2;; | |||
-h | --help ) | |||
usage; exit 1 ;; | |||
-n | --app-name ) | |||
app_name="$2"; shift 2;; | |||
-p | --docker-password ) | |||
docker_password="$2"; shift 2;; | |||
-r | --registry ) | |||
container_registry="$2"; shift 2;; | |||
--skip-clean ) | |||
clean=''; shift ;; | |||
--image-build ) | |||
build_images='yes'; shift ;; | |||
--image-push ) | |||
push_images='yes'; shift ;; | |||
--skip-infrastructure ) | |||
skip_infrastructure='yes'; shift ;; | |||
-t | --tag ) | |||
image_tag="$2"; shift 2;; | |||
-u | --docker-username ) | |||
docker_username="$2"; shift 2;; | |||
--use-local-k8s ) | |||
use_local_k8s='yes'; shift ;; | |||
--namespace ) | |||
namespace="$2"; shift 2;; | |||
*) | |||
echo "Unknown option $1" | |||
usage; exit 2 ;; | |||
esac | |||
done | |||
if [[ $build_solution ]]; then | |||
echo "#################### Building $app_name solution ####################" | |||
dotnet publish -o obj/Docker/publish ../../eShopOnContainers-ServicesAndWebApps.sln | |||
fi | |||
export TAG=$image_tag | |||
if [[ $build_images ]]; then | |||
echo "#################### Building the $app_name Docker images ####################" | |||
docker-compose -p ../.. -f ../../docker-compose.yml build | |||
# Remove temporary images | |||
docker rmi $(docker images -qf "dangling=true") | |||
fi | |||
use_custom_registry='' | |||
if [[ -n $container_registry ]]; then | |||
echo "################ Log into custom registry $container_registry ##################" | |||
use_custom_registry='yes' | |||
if [[ -z $docker_username ]] || [[ -z $docker_password ]]; then | |||
echo "Error: Must use -u (--docker-username) AND -p (--docker-password) if specifying custom registry" | |||
exit 1 | |||
fi | |||
docker login -u $docker_username -p $docker_password $container_registry | |||
fi | |||
if [[ $push_images ]]; then | |||
echo "#################### Pushing images to the container registry ####################" | |||
services=(basket.api catalog.api identity.api ordering.api marketing.api payment.api locations.api webmvc webspa webstatus) | |||
if [[ -z "$(docker image ls -q --filter=reference=eshop/$service:$image_tag)" ]]; then | |||
image_tag=linux-$image_tag | |||
fi | |||
for service in "${services[@]}" | |||
do | |||
echo "Pushing image for service $service..." | |||
docker tag "eshop/$service:$image_tag" "$container_registry/$service:$image_tag" | |||
docker push "$container_registry/$service:$image_tag" | |||
done | |||
fi | |||
ingress_values_file="ingress_values.yaml" | |||
if [[ $use_local_k8s ]]; then | |||
ingress_values_file="ingress_values_dockerk8s.yaml" | |||
dns="localhost" | |||
fi | |||
if [[ $dns == "aks" ]]; then | |||
echo "#################### Begin AKS discovery based on the --dns aks setting. ####################" | |||
if [[ -z $aks_name ]] || [[ -z $aks_rg ]]; then | |||
echo "Error: When using -dns aks, MUST set -aksName and -aksRg too." | |||
echo '' | |||
usage | |||
exit 1 | |||
fi | |||
echo "Getting AKS cluster $aks_name AKS (in resource group $aks_rg)" | |||
# JMESPath queries are case sensitive and httpapplicationrouting can be lowercase sometimes | |||
jmespath_dnsqueries=(\ | |||
addonProfiles.httpApplicationRouting.config.HTTPApplicationRoutingZoneName \ | |||
addonProfiles.httpapplicationrouting.config.HTTPApplicationRoutingZoneName \ | |||
) | |||
for q in "${jmespath_dnsqueries[@]}" | |||
do | |||
dns="$(az aks show -n $aks_name -g $aks_rg --query $q -o tsv)" | |||
if [[ -n $dns ]]; then break; fi | |||
done | |||
if [[ -z $dns ]]; then | |||
echo "Error: when getting DNS of AKS $aks_name (in resource group $aks_rg). Please ensure AKS has httpRouting enabled AND Azure CLI is logged in and is of version 2.0.37 or higher." | |||
exit 1 | |||
fi | |||
echo "DNS base found is $dns. Will use $aks_name.$dns for the app!" | |||
dns="$aks_name.$dns" | |||
fi | |||
# Initialization & check commands | |||
if [[ -z $dns ]]; then | |||
echo "No DNS specified. Ingress resources will be bound to public IP." | |||
fi | |||
if [[ $clean ]]; then | |||
echo "Cleaning previous helm releases..." | |||
if [[ -z $(helm ls -q --namespace $namespace) ]]; then | |||
echo "No previous releases found" | |||
else | |||
helm delete --purge $(helm ls -q --namespace $namespace) | |||
echo "Previous releases deleted" | |||
waitsecs=10; while [ $waitsecs -gt 0 ]; do echo -ne "$waitsecs\033[0K\r"; sleep 1; : $((waitsecs--)); done | |||
fi | |||
fi | |||
echo "#################### Begin $app_name installation using Helm ####################" | |||
infras=(sql-data nosql-data rabbitmq keystore-data basket-data) | |||
charts=(eshop-common apigwmm apigwms apigwwm apigwws basket-api catalog-api identity-api locations-api marketing-api mobileshoppingagg ordering-api ordering-backgroundtasks ordering-signalrhub payment-api webmvc webshoppingagg webspa webstatus webhooks-api webhooks-web) | |||
if [[ !$skip_infrastructure ]]; then | |||
for infra in "${infras[@]}" | |||
do | |||
echo "Installing infrastructure: $infra" | |||
helm install --namespace $namespace --set "ingress.hosts={$dns}" --values app.yaml --values inf.yaml --values $ingress_values_file --set app.name=$app_name --set inf.k8s.dns=$dns --name="$app_name-$infra" $infra | |||
done | |||
fi | |||
for chart in "${charts[@]}" | |||
do | |||
echo "Installing: $chart" | |||
if [[ $use_custom_registry ]]; then | |||
helm install --namespace $namespace --set "ingress.hosts={$dns}" --set inf.registry.server=$container_registry --set inf.registry.login=$docker_username --set inf.registry.pwd=$docker_password --set inf.registry.secretName=eshop-docker-scret --values app.yaml --values inf.yaml --values $ingress_values_file --set app.name=$app_name --set inf.k8s.dns=$dns --set image.tag=$image_tag --set image.pullPolicy=Always --name="$app_name-$chart" $chart | |||
elif [[ $chart != "eshop-common" ]]; then # eshop-common is ignored when no secret must be deployed | |||
helm install --namespace $namespace --set "ingress.hosts={$dns}" --values app.yaml --values inf.yaml --values $ingress_values_file --set app.name=$app_name --set inf.k8s.dns=$dns --set image.tag=$image_tag --set image.pullPolicy=Always --name="$app_name-$chart" $chart | |||
fi | |||
done | |||
echo "FINISHED: Helm charts installed." |
@ -0,0 +1,325 @@ | |||
# Using Helm Charts to deploy eShopOnContainers to AKS with ISTIO | |||
It is possible to deploy eShopOnContainers on a AKS using [Helm](https://helm.sh/) instead of custom scripts (that will be deprecated soon). | |||
## Create Kubernetes cluster in AKS | |||
You can create the AKS cluster by using two ways: | |||
- A. Use Azure CLI: Follow a procedure suing [Azure CLI like here](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough), but make sure you **enable RBAC** with `--enable-rbac` in `az aks create` command. | |||
- B. Use Azure's portal | |||
The following steps are using the Azure portal to create the AKS cluster: | |||
- Start the process by providing the general data, like in the following screenshot: | |||
![image](https://user-images.githubusercontent.com/1712635/45787360-c59ecd80-bc29-11e8-9565-c989ad6ad57b.png) | |||
- Then, very important, in the next step, enable RBAC: | |||
![image](https://user-images.githubusercontent.com/1712635/45780917-8bc2cc80-bc13-11e8-87ac-2942b3c7496d.png) | |||
You can use **basic network** settings since for a test you don't need integration into any existing VNET. | |||
![image](https://user-images.githubusercontent.com/1712635/45780991-b745b700-bc13-11e8-926b-afac57229d0a.png) | |||
- You can also enable monitoring: | |||
![image](https://user-images.githubusercontent.com/1712635/45781148-1277a980-bc14-11e8-8614-f7a239731bec.png) | |||
- Finally, create the cluster. It'll take a few minutes for it to be ready. | |||
### Configure RBAC security for K8s dashboard service-account | |||
In order NOT to get errors in the Kubernetes dashboard, you'll need to set the following service-account steps. | |||
Here you can see the errors you might see: | |||
![image](https://user-images.githubusercontent.com/1712635/45784384-5622e100-bc1d-11e8-8d33-e22fd955150a.png) | |||
Now, just run the Azure CLI command to browse the Kubernetes Dashboard: | |||
`az aks browse --resource-group pro-eshop-aks-helm-linux-resgrp --name pro-eshop-aks-helm-linux` | |||
![image](https://user-images.githubusercontent.com/1712635/45786406-2d9ee500-bc25-11e8-83e9-bdfc302e80f1.png) | |||
## Additional pre-requisites | |||
In addition to having an AKS cluster created in Azure and having kubectl and Azure CLI installed in your local machine and configured to use your Azure subscription, you also need the following pre-requisites: | |||
### Install Helm | |||
You need to have helm installed on your machine, and Tiller must be installed on the AKS. Follow these instructions on how to ['Install applications with Helm in Azure Kubernetes Service (AKS)'](https://docs.microsoft.com/en-us/azure/aks/kubernetes-helm) to setup Helm and Tiller for AKS. | |||
**Note**: If your ASK cluster is not RBAC-enabled (default option in portal) you may receive following error when running a helm command: | |||
``` | |||
Error: Get http://localhost:8080/api/v1/namespaces/kube-system/configmaps?labelSelector=OWNER%!D(MISSING)TILLER: dial tcp [::1]:8080: connect: connection refused | |||
``` | |||
If so, type: | |||
``` | |||
kubectl --namespace=kube-system edit deployment/tiller-deploy | |||
``` | |||
Your default text editor will popup with the YAML definition of the tiller deploy. Search for: | |||
``` | |||
automountServiceAccountToken: false | |||
``` | |||
And change it to: | |||
``` | |||
automountServiceAccountToken: true | |||
``` | |||
Save the file and close the editor. This should reapply the deployment in the cluster. Now Helm commands should work. | |||
## Install eShopOnContainers with Istio using Helm | |||
All steps need to be performed on `/k8s/helm` folder. The easiest way is to use the `deploy-all-istio.ps1` script from a Powershell window: | |||
``` | |||
.\deploy-all-istio.ps1 -dnsname eshoptestistio -externalDns aks -aksName eshoptest -aksRg eshoptest -imageTag dev | |||
``` | |||
This will install all the [eShopOnContainers public images](https://hub.docker.com/u/eshop/) with tag `dev` on the AKS named `eshoptest` in the resource group `eshoptest` and with the dns url: http://**eshoptestistio**.westus.cloudapp.azure.com/ . By default all infrastructure (sql, mongo, rabbit and redis) is installed also in the cluster. | |||
Once the script is run, you should see following output when using `kubectl get deployment`: | |||
``` | |||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE | |||
eshop-apigwmm 1 1 1 1 4d | |||
eshop-apigwms 1 1 1 1 4d | |||
eshop-apigwwm 1 1 1 1 4d | |||
eshop-apigwws 1 1 1 1 4d | |||
eshop-basket-api 1 1 1 1 4d | |||
eshop-basket-data 1 1 1 1 4d | |||
eshop-catalog-api 1 1 1 1 4d | |||
eshop-identity-api 1 1 1 1 4d | |||
eshop-keystore-data 1 1 1 1 4d | |||
eshop-locations-api 1 1 1 1 4d | |||
eshop-marketing-api 1 1 1 1 4d | |||
eshop-mobileshoppingagg 1 1 1 1 4d | |||
eshop-nosql-data 1 1 1 1 4d | |||
eshop-ordering-api 1 1 1 1 4d | |||
eshop-ordering-backgroundtasks 1 1 1 1 4d | |||
eshop-ordering-signalrhub 1 1 1 1 4d | |||
eshop-payment-api 1 1 1 1 4d | |||
eshop-rabbitmq 1 1 1 1 4d | |||
eshop-sql-data 1 1 1 1 4d | |||
eshop-webmvc 1 1 1 1 4d | |||
eshop-webshoppingagg 1 1 1 1 4d | |||
eshop-webspa 1 1 1 1 4d | |||
eshop-webstatus 1 1 1 1 4d | |||
``` | |||
Every public service is exposed through the istio ingress gateway. | |||
Yo can see the ingress gateway public ip doing `kubectl get services -n istio-system` | |||
``` | |||
grafana ClusterIP 10.0.204.87 <none> 3000/TCP 1h | |||
istio-citadel ClusterIP 10.0.23.86 <none> 8060/TCP,9093/TCP 1h | |||
istio-egressgateway ClusterIP 10.0.136.169 <none> 80/TCP,443/TCP 1h | |||
istio-galley ClusterIP 10.0.113.51 <none> 443/TCP,9093/TCP 1h | |||
istio-ingressgateway LoadBalancer 10.0.76.80 40.118.189.161 80:31380/TCP,443:31390/TCP,31400:31400/TCP,15011:31276/TCP,8060:30519/TCP,853:31698/TCP,15030:31453/TCP,15031:32362/TCP 1h | |||
istio-pilot ClusterIP 10.0.164.253 <none> 15010/TCP,15011/TCP,8080/TCP,9093/TCP 1h | |||
istio-policy ClusterIP 10.0.170.49 <none> 9091/TCP,15004/TCP,9093/TCP 1h | |||
istio-sidecar-injector ClusterIP 10.0.251.12 <none> 443/TCP 1h | |||
istio-telemetry ClusterIP 10.0.195.112 <none> 9091/TCP,15004/TCP,9093/TCP,42422/TCP 1h | |||
jaeger-agent ClusterIP None <none> 5775/UDP,6831/UDP,6832/UDP 1h | |||
jaeger-collector ClusterIP 10.0.123.98 <none> 14267/TCP,14268/TCP 1h | |||
jaeger-query ClusterIP 10.0.244.146 <none> 16686/TCP 1h | |||
kiali ClusterIP 10.0.182.12 <none> 20001/TCP 1h | |||
prometheus ClusterIP 10.0.136.223 <none> 9090/TCP 1h | |||
tracing ClusterIP 10.0.57.236 <none> 80/TCP 1h | |||
zipkin ClusterIP 10.0.30.57 <none> 9411/TCP 1h | |||
``` | |||
You can view the MVC client at http://[dns]/ | |||
## Customizing the deployment | |||
### Using your own images | |||
To use your own images instead of the public ones, you have to pass following additional parameters to the `deploy-all-istio.ps1` script: | |||
* `registry`: Login server for the Docker registry | |||
* `dockerUser`: User login for the Docker registry | |||
* `dockerPassword`: User password for the Docker registry | |||
This will deploy a secret on the cluster to connect to the specified server, and all image names deployed will be prepended with `registry/` value. | |||
### Not deploying infrastructure containers | |||
If you want to use external resources, use `-deployInfrastructure $false` to not deploy infrastructure containers. However **you still have to manually update the scripts to provide your own configuration** (see next section). | |||
### Providing your own configuration | |||
The file `inf.yaml` contains the description of the infrastructure used. File is docummented so take a look on it to understand all of its entries. If using external resources you need to edit this file according to your needs. You'll need to edit: | |||
* `inf.sql.host` with the host name of the SQL Server | |||
* `inf.sql.common` entries to provide your SQL user, password. `Pid` is not used when using external resources (it is used to set specific product id for the SQL Server container). | |||
* `inf.sql.catalog`, `inf.sql.ordering`, `inf.sql.identity`: To provide the database names for catalog, ordering and identity services | |||
* `mongo.host`: With the host name of the Mongo DB | |||
* `mongo.locations`, `mongo.marketing` with the database names for locations and marketing services | |||
* `redis.basket.constr` with the connection string to Redis for Basket Service. Note that `redis.basket.svc` is not used when using external services | |||
* `redis.keystore.constr` with the connection string to Redis for Keystore Service. Note that `redis.keystore.svc` is not used when using external services | |||
* `eventbus.constr` with the connection string to Azure Service Bus and `eventbus.useAzure` to `true` to use Azure service bus. Note that `eventbus.svc` is not used when using external services | |||
### Using Azure storage for Catalog Photos | |||
Using Azure storage for catalog (and marketing) photos is not directly supported, but you can accomplish it by editing the file `k8s/helm/catalog-api/templates/configmap.yaml`. Search for lines: | |||
``` | |||
catalog__PicBaseUrl: http://{{ $webshoppingapigw }}/api/v1/c/catalog/items/[0]/pic/ | |||
``` | |||
And replace it for: | |||
``` | |||
catalog__PicBaseUrl: http://<url-of-the-storage>/ | |||
``` | |||
In the same way, to use Azure storage for the marketing service, have to edit the file `k8s/helm/marketing-api/templates/configmap.yaml` and replacing the line: | |||
``` | |||
marketing__PicBaseUrl: http://{{ $webshoppingapigw }}/api/v1/c/catalog/items/[0]/pic/ | |||
``` | |||
by: | |||
``` | |||
marketing__PicBaseUrl: http://<url-of-the-storage>/ | |||
``` | |||
# Using Helm Charts to deploy eShopOnContainers to a local Kubernetes in Windows with 'Docker for Windows' | |||
## Additional pre-requisites | |||
In addition to having Docker for Windows/Mac with Kubernetes enabled and having kubectl ayou also need the following pre-requisites: | |||
### Install Helm | |||
You need to have helm installed on your machine, and Tiller must be installed on the local Docker Kubernetes cluster. Once you have [Helm downloaded](https://helm.sh/) and installed on your machine you must: | |||
1. Create the tiller service account, by running `kubectl apply -f helm-rbac.yaml` from `/k8s` folder | |||
2. Install tiller and configure it to use the tiller service account by typing `helm init --service-account tiller` | |||
## Install eShopOnContainers with Istio using Helm | |||
All steps need to be performed on `/k8s/helm` folder. The easiest way is to use the `deploy-all-istio.ps1` script from a Powershell window: | |||
``` | |||
.\deploy-all-istio.ps1 -imageTag dev -useLocalk8s $true | |||
``` | |||
The parameter `useLocalk8s` to $true, forces the script to use `localhost` as the DNS for all Helm charts. | |||
This will install all the [eShopOnContainers public images](https://hub.docker.com/u/eshop/) with tag `dev` on the Docker local Kubernetes cluster. By default all infrastructure (sql, mongo, rabbit and redis) is installed also in the cluster. | |||
Once the script is run, you should see following output when using `kubectl get deployment`: | |||
``` | |||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE | |||
eshop-apigwmm 1 1 1 1 2h | |||
eshop-apigwms 1 1 1 1 2h | |||
eshop-apigwwm 1 1 1 1 2h | |||
eshop-apigwws 1 1 1 1 2h | |||
eshop-basket-api 1 1 1 1 2h | |||
eshop-basket-data 1 1 1 1 2h | |||
eshop-catalog-api 1 1 1 1 2h | |||
eshop-identity-api 1 1 1 1 2h | |||
eshop-keystore-data 1 1 1 1 2h | |||
eshop-locations-api 1 1 1 1 2h | |||
eshop-marketing-api 1 1 1 1 2h | |||
eshop-mobileshoppingagg 1 1 1 1 2h | |||
eshop-nosql-data 1 1 1 1 2h | |||
eshop-ordering-api 1 1 1 1 2h | |||
eshop-ordering-backgroundtasks 1 1 1 1 2h | |||
eshop-ordering-signalrhub 1 1 1 1 2h | |||
eshop-payment-api 1 1 1 1 2h | |||
eshop-rabbitmq 1 1 1 1 2h | |||
eshop-sql-data 1 1 1 1 2h | |||
eshop-webmvc 1 1 1 1 2h | |||
eshop-webshoppingagg 1 1 1 1 2h | |||
eshop-webspa 1 1 1 1 2h | |||
eshop-webstatus 1 1 1 1 2h | |||
``` | |||
Note that istio ingress gateway is bound to DNS localhost and the host is also "localhost". So, you can access the webspa by typing `http://localhost` and the MVC by typing `http://localhost/` | |||
As this is the Docker local K8s cluster, you can see also the containers running on your machine. If you type `docker ps` you'll see all them: | |||
``` | |||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES | |||
fec1e3499416 a3f21ec4bd11 "/entrypoint.sh /ngi…" 9 minutes ago Up 9 minutes k8s_nginx-ingress-controller_nginx-ingress-controller-f88c75bc6-5xs2n_ingress-nginx_f1cc7094-e68f-11e8-b4b6-00155d016146_0 | |||
76485867f032 eshop/payment.api "dotnet Payment.API.…" 2 hours ago Up 2 hours k8s_payment-api_eshop-payment-api-75d5f9bdf6-6zx2v_default_4a3cdab4-e67f-11e8-b4b6-00155d016146_1 | |||
c2c4640ed610 eshop/marketing.api "dotnet Marketing.AP…" 2 hours ago Up 2 hours k8s_marketing-api_eshop-marketing-api-6b8c5989fd-jpxqv_default_45780626-e67f-11e8-b4b6-00155d016146_1 | |||
85301d538574 eshop/ordering.signalrhub "dotnet Ordering.Sig…" 2 hours ago Up 2 hours k8s_ordering-signalrhub_eshop-ordering-signalrhub-58cf5ff6-cnlm8_default_4932c344-e67f-11e8-b4b6-00155d016146_1 | |||
7a408a98000e eshop/ordering.backgroundtasks "dotnet Ordering.Bac…" 2 hours ago Up 2 hours k8s_ordering-backgroundtasks_eshop-ordering-backgroundtasks-cc8f6d4d8-ztfk7_default_47f9cf10-e67f-11e8-b4b6-00155d016146_1 | |||
12c64b3a13e0 eshop/basket.api "dotnet Basket.API.d…" 2 hours ago Up 2 hours k8s_basket-api_eshop-basket-api-658546684d-6hlvd_default_4262d022-e67f-11e8-b4b6-00155d016146_1 | |||
133fccfeeff3 eshop/webstatus "dotnet WebStatus.dll" 2 hours ago Up 2 hours k8s_webstatus_eshop-webstatus-7f46479dc4-bqnq7_default_4dc13eb2-e67f-11e8-b4b6-00155d016146_0 | |||
00c6e4c52135 eshop/webspa "dotnet WebSPA.dll" 2 hours ago Up 2 hours k8s_webspa_eshop-webspa-64cb8df9cb-dcbwg_default_4cd47376-e67f-11e8-b4b6-00155d016146_0 | |||
d4507f1f6b1a eshop/webshoppingagg "dotnet Web.Shopping…" 2 hours ago Up 2 hours k8s_webshoppingagg_eshop-webshoppingagg-cc94fc86-sxd2v_default_4be6cdb9-e67f-11e8-b4b6-00155d016146_0 | |||
9178e26703da eshop/webmvc "dotnet WebMVC.dll" 2 hours ago Up 2 hours k8s_webmvc_eshop-webmvc-985779684-4br5z_default_4addd4d6-e67f-11e8-b4b6-00155d016146_0 | |||
1088c281c710 eshop/ordering.api "dotnet Ordering.API…" 2 hours ago Up 2 hours k8s_ordering-api_eshop-ordering-api-fb8c548cb-k68x9_default_4740958a-e67f-11e8-b4b6-00155d016146_0 | |||
12424156d5c9 eshop/mobileshoppingagg "dotnet Mobile.Shopp…" 2 hours ago Up 2 hours k8s_mobileshoppingagg_eshop-mobileshoppingagg-b54645d7b-rlrgh_default_46c00017-e67f-11e8-b4b6-00155d016146_0 | |||
65463ffd437d eshop/locations.api "dotnet Locations.AP…" 2 hours ago Up 2 hours k8s_locations-api_eshop-locations-api-577fc94696-dfhq8_default_44929c4b-e67f-11e8-b4b6-00155d016146_0 | |||
5b3431873763 eshop/identity.api "dotnet Identity.API…" 2 hours ago Up 2 hours k8s_identity-api_eshop-identity-api-85d9b79f4-s5ks7_default_43d6eb7c-e67f-11e8-b4b6-00155d016146_0 | |||
7c8e77252459 eshop/catalog.api "dotnet Catalog.API.…" 2 hours ago Up 2 hours k8s_catalog-api_eshop-catalog-api-59fd444fb-ztvhz_default_4356705a-e67f-11e8-b4b6-00155d016146_0 | |||
94d95d0d3653 eshop/ocelotapigw "dotnet OcelotApiGw.…" 2 hours ago Up 2 hours k8s_apigwws_eshop-apigwws-65474b979d-n99jw_default_41395473-e67f-11e8-b4b6-00155d016146_0 | |||
bc4bbce71d5f eshop/ocelotapigw "dotnet OcelotApiGw.…" 2 hours ago Up 2 hours k8s_apigwwm_eshop-apigwwm-857c549dd8-8w5gv_default_4098d770-e67f-11e8-b4b6-00155d016146_0 | |||
840aabcceaa9 eshop/ocelotapigw "dotnet OcelotApiGw.…" 2 hours ago Up 2 hours k8s_apigwms_eshop-apigwms-5b94dfb54b-dnmr9_default_401fc611-e67f-11e8-b4b6-00155d016146_0 | |||
aabed7646f5b eshop/ocelotapigw "dotnet OcelotApiGw.…" 2 hours ago Up 2 hours k8s_apigwmm_eshop-apigwmm-85f96cbdb4-dhfwr_default_3ed7967a-e67f-11e8-b4b6-00155d016146_0 | |||
49c5700def5a f06a5773f01e "docker-entrypoint.s…" 2 hours ago Up 2 hours k8s_basket-data_eshop-basket-data-66fbc788cc-csnlw_default_3e0c45fe-e67f-11e8-b4b6-00155d016146_0 | |||
a5db4c521807 f06a5773f01e "docker-entrypoint.s…" 2 hours ago Up 2 hours k8s_keystore-data_eshop-keystore-data-5c9c85cb99-8k56s_default_3ce1a273-e67f-11e8-b4b6-00155d016146_0 | |||
aae88fd2d810 d69a5113ceae "docker-entrypoint.s…" 2 hours ago Up 2 hours k8s_rabbitmq_eshop-rabbitmq-6b68647bc4-gr565_default_3c37ee6a-e67f-11e8-b4b6-00155d016146_0 | |||
65d49ca9589d bbed8d0e01c1 "docker-entrypoint.s…" 2 hours ago Up 2 hours k8s_nosql-data_eshop-nosql-data-579c9d89f8-mtt95_default_3b9c1f89-e67f-11e8-b4b6-00155d016146_0 | |||
090e0dde2ec4 bbe2822dfe38 "/opt/mssql/bin/sqls…" 2 hours ago Up 2 hours k8s_sql-data_eshop-sql-data-5c4fdcccf4-bscdb_default_3afd29b8-e67f-11e8-b4b6-00155d016146_0 | |||
``` | |||
## Known issues | |||
Login from the webmvc results in following error: HttpRequestException: Response status code does not indicate success: 404 (Not Found). | |||
The reason is because MVC needs to access the Identity Server from both outside the container (browser) and inside the container (C# code). Thus, the configuration uses always the *external url* of the Identity Server, which in this case is just `http://localhost/identity-api`. But this external url is incorrect when used from C# code, and the web mvc can't access the identity api. This is the only case when this issue happens (and is the reason why we use 10.0.75.1 for local address in web mvc in local development mode) | |||
Solving this requires some manual steps: | |||
Update the configmap of Web MVC by typing (**line breaks are mandatory**) and your cluster dns name has to be the same of your environment: | |||
``` | |||
kubectl patch cm cfg-eshop-webmvc --type strategic --patch @' | |||
data: | |||
urls__IdentityUrl: http://**eshoptest**.westus.cloudapp.azure.com/identity | |||
urls__mvc: http://**eshoptest**.westus.cloudapp.azure.com/webmvc | |||
'@ | |||
``` | |||
Update the configmap of Identity API by typing (**line breaks are mandatory**): | |||
``` | |||
kubectl patch cm cfg-eshop-identity-api --type strategic --patch @' | |||
data: | |||
mvc_e: http://**eshoptest**.westus.cloudapp.azure.com/webmvc | |||
'@ | |||
``` | |||
Restart the SQL Server pod to ensure the database is recreated again: | |||
``` | |||
kubectl delete pod --selector app=sql-data | |||
``` | |||
Wait until SQL Server pod is ready to accept connections and then restart all other pods: | |||
``` | |||
kubectl delete pod --selector="app!=sql-data" | |||
``` | |||
**Note:** Pods are deleted to ensure the databases are recreated again, as identity api stores its client names and urls in the database. | |||
Now, you can access the MVC app using: `http://**eshoptest**.westus.cloudapp.azure.com/`. | |||
@ -0,0 +1,15 @@ | |||
apiVersion: networking.istio.io/v1alpha3 | |||
kind: Gateway | |||
metadata: | |||
name: istio-ingressgateway | |||
#namespace: istio-system | |||
spec: | |||
selector: | |||
istio: ingressgateway # use Istio default gateway implementation | |||
servers: | |||
- port: | |||
number: 80 | |||
name: http | |||
protocol: HTTP | |||
hosts: | |||
- "*" |
@ -0,0 +1,59 @@ | |||
apiVersion: networking.istio.io/v1alpha3 | |||
kind: VirtualService | |||
metadata: | |||
name: webmvcvs | |||
namespace: default | |||
spec: | |||
hosts: | |||
- "*" | |||
gateways: | |||
- istio-ingressgateway | |||
http: | |||
- match: | |||
- uri: | |||
prefix: / | |||
route: | |||
- destination: | |||
port: | |||
number: 80 | |||
host: webmvc | |||
--- | |||
apiVersion: networking.istio.io/v1alpha3 | |||
kind: VirtualService | |||
metadata: | |||
name: webshoppingapigwvs | |||
namespace: default | |||
spec: | |||
hosts: | |||
- "*" | |||
gateways: | |||
- istio-ingressgateway | |||
http: | |||
- match: | |||
- uri: | |||
prefix: /webshoppingapigw | |||
route: | |||
- destination: | |||
port: | |||
number: 80 | |||
host: webshoppingapigw | |||
--- | |||
apiVersion: networking.istio.io/v1alpha3 | |||
kind: VirtualService | |||
metadata: | |||
name: identityvs | |||
namespace: default | |||
spec: | |||
hosts: | |||
- "*" | |||
gateways: | |||
- istio-ingressgateway | |||
http: | |||
- match: | |||
- uri: | |||
prefix: /identity | |||
route: | |||
- destination: | |||
port: | |||
number: 80 | |||
host: identity |
@ -0,0 +1,36 @@ | |||
{{- if .Values.ingress.enabled -}} | |||
{{- $ingressPath := include "pathBase" . -}} | |||
{{- $serviceName := .Values.app.svc.locations }} | |||
apiVersion: extensions/v1beta1 | |||
kind: Ingress | |||
metadata: | |||
name: {{ template "locations-api.fullname" . }} | |||
labels: | |||
app: {{ template "locations-api.name" . }} | |||
chart: {{ template "locations-api.chart" . }} | |||
release: {{ .Release.Name }} | |||
heritage: {{ .Release.Service }} | |||
{{- with .Values.ingress.annotations }} | |||
annotations: | |||
{{ toYaml . | indent 4 }} | |||
{{- end }} | |||
spec: | |||
{{- if .Values.ingress.tls }} | |||
tls: | |||
{{- range .Values.ingress.tls }} | |||
- hosts: | |||
- {{ .Values.inf.k8s.dns }} | |||
secretName: {{ .secretName }} | |||
{{- end }} | |||
{{- end }} | |||
rules: | |||
{{- range .Values.ingress.hosts }} | |||
- host: {{ . }} | |||
http: | |||
paths: | |||
- path: {{ $ingressPath }} | |||
backend: | |||
serviceName: {{ $serviceName }} | |||
servicePort: http | |||
{{- end }} | |||
{{- end }} |
@ -0,0 +1,36 @@ | |||
{{- if .Values.ingress.enabled -}} | |||
{{- $ingressPath := include "pathBase" . -}} | |||
{{- $serviceName := .Values.app.svc.marketing }} | |||
apiVersion: extensions/v1beta1 | |||
kind: Ingress | |||
metadata: | |||
name: {{ template "marketing-api.fullname" . }} | |||
labels: | |||
app: {{ template "marketing-api.name" . }} | |||
chart: {{ template "marketing-api.chart" . }} | |||
release: {{ .Release.Name }} | |||
heritage: {{ .Release.Service }} | |||
{{- with .Values.ingress.annotations }} | |||
annotations: | |||
{{ toYaml . | indent 4 }} | |||
{{- end }} | |||
spec: | |||
{{- if .Values.ingress.tls }} | |||
tls: | |||
{{- range .Values.ingress.tls }} | |||
- hosts: | |||
- {{ .Values.inf.k8s.dns }} | |||
secretName: {{ .secretName }} | |||
{{- end }} | |||
{{- end }} | |||
rules: | |||
{{- range .Values.ingress.hosts }} | |||
- host: {{ . }} | |||
http: | |||
paths: | |||
- path: {{ $ingressPath }} | |||
backend: | |||
serviceName: {{ $serviceName }} | |||
servicePort: http | |||
{{- end }} | |||
{{- end }} |
@ -0,0 +1,36 @@ | |||
{{- if .Values.ingress.enabled -}} | |||
{{- $ingressPath := include "pathBase" . -}} | |||
{{- $serviceName := .Values.app.svc.mobileshoppingagg }} | |||
apiVersion: extensions/v1beta1 | |||
kind: Ingress | |||
metadata: | |||
name: {{ template "mobileshoppingagg.fullname" . }} | |||
labels: | |||
app: {{ template "mobileshoppingagg.name" . }} | |||
chart: {{ template "mobileshoppingagg.chart" . }} | |||
release: {{ .Release.Name }} | |||
heritage: {{ .Release.Service }} | |||
{{- with .Values.ingress.annotations }} | |||
annotations: | |||
{{ toYaml . | indent 4 }} | |||
{{- end }} | |||
spec: | |||
{{- if .Values.ingress.tls }} | |||
tls: | |||
{{- range .Values.ingress.tls }} | |||
- hosts: | |||
- {{ .Values.inf.k8s.dns }} | |||
secretName: {{ .secretName }} | |||
{{- end }} | |||
{{- end }} | |||
rules: | |||
{{- range .Values.ingress.hosts }} | |||
- host: {{ . }} | |||
http: | |||
paths: | |||
- path: {{ $ingressPath }} | |||
backend: | |||
serviceName: {{ $serviceName }} | |||
servicePort: http | |||
{{- end }} | |||
{{- end }} |