Skip to content
Snippets Groups Projects
Commit 1b535dd5 authored by Tucker Gary Siegel's avatar Tucker Gary Siegel
Browse files

update

parent 77e0bd0a
No related branches found
No related tags found
No related merge requests found
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
apiVersion: v2
name: dawn-gdd
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.1.0"
{{/*
Expand the name of the chart.
*/}}
{{- define "dawn-gateway.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "dawn-gateway.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "dawn-gateway.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "dawn-gateway.labels" -}}
helm.sh/chart: {{ include "dawn-gateway.chart" . }}
{{ include "dawn-gateway.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "dawn-gateway.selectorLabels" -}}
app.kubernetes.io/name: {{ include "dawn-gateway.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "dawn-gateway.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "dawn-gateway.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
apiVersion: v1
kind: ConfigMap
metadata:
name: dawn-gdd-conf
namespace: {{ .Release.Namespace }}
data:
conf: |
app:
name: {{ .Values.conf.app.name }}
logLevel: {{ .Values.conf.app.logLevel }}
logType: {{ .Values.conf.app.logType }}
swagger: {{ .Values.conf.app.swagger }}
auth: {{ .Values.conf.app.auth }}
swagger-host-url: {{ .Values.conf.app.swaggerHostUrl }}
api-version: {{ .Values.conf.app.apiVersion }}
server:
host: {{ .Values.conf.server.host }}
port: {{ .Values.conf.server.port }}
context-path: {{ .Values.conf.server.contextPath }}
db:
uri: {{ .Values.conf.db.uri }}
database: {{ .Values.conf.db.database }}
\ No newline at end of file
apiVersion: apps/v1 # for k8s versions before 1.9.0 use apps/v1beta2 and before 1.8.0 use extensions/v1beta1
kind: Deployment
metadata:
name: dawn-gdd
namespace: {{ .Release.Namespace }}
labels:
version: {{ .Chart.AppVersion }}
spec:
selector:
matchLabels:
service: dawn-gdd
env: {{ .Release.Namespace }}
replicas: 1
template:
metadata:
labels:
service: dawn-gdd
env: {{ .Release.Namespace }}
version: {{ .Chart.AppVersion }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configMap.yaml") . | sha256sum }}
spec:
containers:
- name: dawn-gdd
image: docker.registry.dawn.int:5000/dawn/dawn-gdd:{{.Chart.AppVersion}}
resources:
requests:
cpu: {{.Values.resources.requests.cpu}}
memory: {{.Values.resources.requests.memory}}
volumeMounts:
- name: conf
mountPath: "/root/config/conf"
subPath: conf
readOnly: true
args:
- --config
- conf
ports:
- containerPort: {{.Values.port}}
volumes:
- name: conf
configMap:
name: dawn-gdd-conf
apiVersion: v1
kind: Service
metadata:
name: dawn-gdd
namespace: {{ .Release.Namespace }}
labels:
service: dawn-gdd
env: {{ .Release.Namespace }}
spec:
type: ClusterIP
ports:
- port: 80
targetPort: {{.Values.port}}
selector:
service: dawn-gdd
env: {{ .Release.Namespace }}
\ No newline at end of file
port: 5000
resources:
requests:
cpu: 100m
memory: 100Mi
# default for staging
conf:
app:
name: gdd-service
logType: json
logLevel: DEBUG
swagger: true
auth: false
swaggerHostUrl: "localhost:5000"
apiVersion: 1
server:
host: "localhost"
port: 5000
contextPath: "/api/weather"
db:
uri: "mongodb://deployment-internal-mongo.deployment-internal.svc.cluster.local:27017/"
database: "weather-service"
package services package services
import ( import (
"math" "math"
"sort" "sort"
"time" "time"
"github.com/montanaflynn/stats" "github.com/montanaflynn/stats"
"github.com/tgs266/dawn-go-common/common" "github.com/tgs266/dawn-go-common/common"
"gitlab.cs.umd.edu/dawn/go-backend/dawn-gdd/models" "gitlab.cs.umd.edu/dawn/go-backend/dawn-gdd/models"
"gitlab.cs.umd.edu/dawn/go-backend/dawn-gdd/models/enums" "gitlab.cs.umd.edu/dawn/go-backend/dawn-gdd/models/enums"
"gitlab.cs.umd.edu/dawn/go-backend/dawn-gdd/persistence" "gitlab.cs.umd.edu/dawn/go-backend/dawn-gdd/persistence"
"gitlab.cs.umd.edu/dawn/go-backend/dawn-gdd/persistence/entities" "gitlab.cs.umd.edu/dawn/go-backend/dawn-gdd/persistence/entities"
"gitlab.cs.umd.edu/dawn/go-backend/dawn-gdd/utils" "gitlab.cs.umd.edu/dawn/go-backend/dawn-gdd/utils"
) )
func GetStageYearData(ctx common.DawnCtx, request models.GddRequest, comparison int) models.StageData { func GetStageYearData(ctx common.DawnCtx, request models.GddRequest, comparison int) models.StageData {
product := enums.GetProductFromString(request.Product) product := enums.GetProductFromString(request.Product)
gddData := persistence.CurrentGddFindFirstByYearAndLocation(ctx, request.BuildLocation()) gddData := persistence.CurrentGddFindFirstByYearAndLocation(ctx, request.BuildLocation())
gdds := utils.CalculateGddValues(gddData.MinTemps, gddData.MaxTemps, product, false) gdds := utils.CalculateGddValues(gddData.MinTemps, gddData.MaxTemps, product, false)
request.Year = gddData.AnalogYear request.Year = gddData.AnalogYear
var gs []entities.Gdd var gs []entities.Gdd
norms := persistence.GetLastNormalsYearly(request.BuildLocation()) norms := persistence.GetLastNormalsYearly(request.BuildLocation())
if comparison == -1 { if comparison == -1 {
gs = norms gs = norms
} else { } else {
gs = []entities.Gdd{persistence.GddFindFirstByYearAndLocation(comparison, request.BuildLocation())} gs = []entities.Gdd{persistence.GddFindFirstByYearAndLocation(comparison, request.BuildLocation())}
} }
var normalMeanNonAcc []float64 var normalMeanNonAcc []float64
comparisonRows := [][]float64{} comparisonRows := [][]float64{}
for i := 0; i < len(gs[0].MinTemps); i++ { for i := 0; i < len(gs[0].MinTemps); i++ {
rowComp := []float64{} rowComp := []float64{}
rowNormal := []float64{} rowNormal := []float64{}
for j := 0; j < len(gs); j++ { for j := 0; j < len(gs); j++ {
rowComp = append(rowComp, utils.CalculateSingleGdd(gs[j].MinTemps[i], gs[j].MaxTemps[i], product)) rowComp = append(rowComp, utils.CalculateSingleGdd(gs[j].MinTemps[i], gs[j].MaxTemps[i], product))
} }
for j := 0; j < len(norms); j++ { for j := 0; j < len(norms); j++ {
rowNormal = append(rowNormal, utils.CalculateSingleGdd(norms[j].MinTemps[i], norms[j].MaxTemps[i], product)) rowNormal = append(rowNormal, utils.CalculateSingleGdd(norms[j].MinTemps[i], norms[j].MaxTemps[i], product))
} }
comparisonRows = append(comparisonRows, rowComp) comparisonRows = append(comparisonRows, rowComp)
normMeanNoAccValue, _ := stats.Mean(rowNormal) normMeanNoAccValue, _ := stats.Mean(rowNormal)
normalMeanNonAcc = append(normalMeanNonAcc, normMeanNoAccValue) normalMeanNonAcc = append(normalMeanNonAcc, normMeanNoAccValue)
} }
allCfs := persistence.CfsFindByLocationMultiple(request.BuildLocation(), 4) allCfs := persistence.CfsFindByLocationMultiple(request.BuildLocation(), 4)
// cfsMeans := persistence.CfsFindAllByLocation(request.BuildLocation()) // cfsMeans := persistence.CfsFindAllByLocation(request.BuildLocation())
gddArr := [][]float64{} gddArr := [][]float64{}
for i, c := range allCfs { for i, c := range allCfs {
gddArr = append(gddArr, gdds) gddArr = append(gddArr, gdds)
cfsGddData := utils.CalculateGddValues(c.MinTemps, c.MaxTemps, product, false) // not accumulated cfsGddData := utils.CalculateGddValues(c.MinTemps, c.MaxTemps, product, false) // not accumulated
// anomaly adjustment function // anomaly adjustment function
// cfsGddData := utils.CalculateGddValuesCfsNormed(c.MinTemps, c.MaxTemps, product, cfsMeans.MinTemps, cfsMeans.MaxTemps, normalMeanNonAcc) // not accumulated // cfsGddData := utils.CalculateGddValuesCfsNormed(c.MinTemps, c.MaxTemps, product, cfsMeans.MinTemps, cfsMeans.MaxTemps, normalMeanNonAcc) // not accumulated
gddArr[i] = append(gddArr[i], cfsGddData...) gddArr[i] = append(gddArr[i], cfsGddData...)
if len(gddArr[i]) > len(normalMeanNonAcc) { if len(gddArr[i]) > len(normalMeanNonAcc) {
gddArr[i] = gddArr[i][:len(normalMeanNonAcc)] gddArr[i] = gddArr[i][:len(normalMeanNonAcc)]
} else { } else {
gddArr[i] = append(gddArr[i], normalMeanNonAcc[len(gddArr[i]):]...) gddArr[i] = append(gddArr[i], normalMeanNonAcc[len(gddArr[i]):]...)
} }
} }
// none of this data is accumulated // none of this data is accumulated
returnData := models.StageData{ returnData := models.StageData{
AllGdds: gddArr, AllGdds: gddArr,
ComparisonAll: comparisonRows, ComparisonAll: comparisonRows,
} }
return returnData return returnData
} }
func CalculateStages(ctx common.DawnCtx, request models.StageRequest) map[string]models.Bins { func CalculateStages(ctx common.DawnCtx, request models.StageRequest) map[string]models.Bins {
gddReq := models.GddRequest{ gddReq := models.GddRequest{
Year: request.PlantDate.Year(), Year: request.PlantDate.Year(),
Latitude: request.Latitude, Latitude: request.Latitude,
Longitude: request.Longitude, Longitude: request.Longitude,
Accumulate: false, Accumulate: false,
Product: "CORN", Product: "CORN",
} }
fyData := GetStageYearData(ctx, gddReq, request.Comparison) fyData := GetStageYearData(ctx, gddReq, request.Comparison)
start := request.PlantDate.YearDay() start := request.PlantDate.YearDay()
year := request.PlantDate.Year() year := request.PlantDate.Year()
if year%4 == 0 && year%100 != 0 || year%400 == 0 { if year%4 == 0 && year%100 != 0 || year%400 == 0 {
start -= 1 start -= 1
} }
state := map[string]models.StageStateInner{} state := map[string]models.StageStateInner{}
stageMatches := models.BuildStageMatches(request.Mode, request.Value, start, fyData, request) stageMatches := models.BuildStageMatches(request.Mode, request.Value, start, fyData, request)
accs := make([]float64, len(fyData.AllGdds)) accs := make([]float64, len(fyData.AllGdds))
accs2 := make([]float64, len(fyData.ComparisonAll[0])) accs2 := make([]float64, len(fyData.ComparisonAll[0]))
accNormal := 0.0 accNormal := 0.0
for i := start; i < len(fyData.AllGdds[0]); i++ { for i := start; i < len(fyData.AllGdds[0]); i++ {
for r, v := range fyData.AllGdds { for r, v := range fyData.AllGdds {
accs[r] += v[i] accs[r] += v[i]
} }
for j := 0; j < len(fyData.ComparisonAll[0]); j++ { for j := 0; j < len(fyData.ComparisonAll[0]); j++ {
accs2[j] += fyData.ComparisonAll[i][j] accs2[j] += fyData.ComparisonAll[i][j]
} }
normal, _ := stats.Mean(accs2) normal, _ := stats.Mean(accs2)
accNormal = normal accNormal = normal
for stage, stageVal := range stageMatches { for stage, stageVal := range stageMatches {
dists := make([]float64, len(fyData.AllGdds)) dists := make([]float64, len(fyData.AllGdds))
for r, v := range accs { for r, v := range accs {
dists[r] = math.Abs(stageVal - v) dists[r] = math.Abs(stageVal - v)
} }
if val, ok := state[stage]; !ok { if val, ok := state[stage]; !ok {
state[stage] = models.StageStateInner{ state[stage] = models.StageStateInner{
Dists: dists, Dists: dists,
Hists: make([]int, len(fyData.AllGdds)), Hists: make([]int, len(fyData.AllGdds)),
NormalMeanDist: 1000000, NormalMeanDist: 1000000,
NormalMeanIdx: 0, NormalMeanIdx: 0,
} }
} else { } else {
normalMeanDist := math.Abs(stageVal - accNormal) normalMeanDist := math.Abs(stageVal - accNormal)
if normalMeanDist < val.NormalMeanDist { if normalMeanDist < val.NormalMeanDist {
val.NormalMeanDist = normalMeanDist val.NormalMeanDist = normalMeanDist
val.NormalMeanIdx = i val.NormalMeanIdx = i
} }
for r := range accs { for r := range accs {
if dists[r] < val.Dists[r] { if dists[r] < val.Dists[r] {
val.Hists[r] = i val.Hists[r] = i
val.Dists[r] = dists[r] val.Dists[r] = dists[r]
} }
} }
state[stage] = val state[stage] = val
} }
} }
} }
ret := BinStageMatches(state, year, start, request.PlantDate) ret := BinStageMatches(state, year, start, request.PlantDate)
return ret return ret
} }
func AvgDiff(data []int) float64 { func AvgDiff(data []int) float64 {
sort.Ints(data) sort.Ints(data)
sum := 0.0 sum := 0.0
c := 0 c := 0
for i := 0; i < len(data)-1; i++ { for i := 0; i < len(data)-1; i++ {
diff := math.Abs(float64(data[i] - data[i+1])) diff := math.Abs(float64(data[i] - data[i+1]))
sum += diff sum += diff
c += 1 c += 1
} }
return sum / float64(c) return sum / float64(c)
} }
func Min(data []int) int { func Min(data []int) int {
sort.Ints(data) sort.Ints(data)
return data[0] return data[0]
} }
func BinStageMatches(stageState map[string]models.StageStateInner, year int, start int, plantDate time.Time) map[string]models.Bins { func BinStageMatches(stageState map[string]models.StageStateInner, year int, start int, plantDate time.Time) map[string]models.Bins {
response := map[string]models.Bins{} response := map[string]models.Bins{}
alpha := 1.0 alpha := 1.0
add := 0 add := 0
if year%4 == 0 && year%100 != 0 || year%400 == 0 { if year%4 == 0 && year%100 != 0 || year%400 == 0 {
add -= 1 add -= 1
} }
for state, stateVal := range stageState { binCount := 3
// min := stateVal.Normal95thIdx
min := Min(stateVal.Hists) for state, stateVal := range stageState {
stepSize := int(math.Ceil(AvgDiff(stateVal.Hists)) + 1) // add 1 to increase range (cheating a little) and for uncertainty // min := stateVal.Normal95thIdx
arr := []float64{} min := Min(stateVal.Hists)
idxs := []int{} stepSize := int(math.Ceil(AvgDiff(stateVal.Hists)) + 1) // add 1 to increase range (cheating a little) and for uncertainty
base := min arr := []float64{}
total := 0 idxs := []int{}
for i := 0; i < 5; i++ { base := min
count := 0.0 total := 0
for _, h := range stateVal.Hists { for i := 0; i < binCount; i++ {
if base <= h && h < base+stepSize { count := 0.0
count += 1 for _, h := range stateVal.Hists {
total += 1 if base <= h && h < base+stepSize {
} count += 1
} total += 1
idxs = append(idxs, base) }
arr = append(arr, count) }
base += stepSize idxs = append(idxs, base)
} arr = append(arr, count)
inner := models.Bins{} base += stepSize
inner.Bins = []models.Bin{} }
for i := 0; i < 5; i++ { inner := models.Bins{}
idx := idxs[i] + add inner.Bins = []models.Bin{}
date := plantDate.AddDate(0, 0, idx-start) for i := 0; i < binCount; i++ {
val := arr[i] idx := idxs[i] + add
smoothedVal := (val + alpha) / (float64(total) + 5*alpha) // modified version of laplace smoothing to remove 0% date := plantDate.AddDate(0, 0, idx-start)
inner.Bins = append(inner.Bins, models.Bin{ val := arr[i]
Date: date, smoothedVal := (val + alpha) / (float64(total) + float64(binCount)*alpha) // modified version of laplace smoothing to remove 0%
Value: smoothedVal, inner.Bins = append(inner.Bins, models.Bin{
}) Date: date,
} Value: smoothedVal,
inner.ComparisonMean = plantDate.AddDate(0, 0, stateVal.NormalMeanIdx-start) })
inner.Count = total }
response[state] = inner inner.ComparisonMean = plantDate.AddDate(0, 0, stateVal.NormalMeanIdx-start)
} inner.Count = total
return response response[state] = inner
} }
return response
func ForecastFirstLastFreeze(ctx common.DawnCtx, request models.FreezingForecastRequest) models.FreezingForecastResponse { }
lastFreezeIdx := 0
firstFreezeIdx := 0 func ForecastFirstLastFreeze(ctx common.DawnCtx, request models.FreezingForecastRequest) models.FreezingForecastResponse {
lastFreezeIdx := 0
baseData := persistence.CurrentGddFindFirstByYearAndLocation(ctx, models.BuildLocation(request.Latitude, request.Longitude)) firstFreezeIdx := 0
cfsData := persistence.CfsFindAllByLocation(models.BuildLocation(request.Latitude, request.Longitude))
normalsData := persistence.NormalsFindFirstByYearAndLocation(models.BuildLocation(request.Latitude, request.Longitude)) baseData := persistence.CurrentGddFindFirstByYearAndLocation(ctx, models.BuildLocation(request.Latitude, request.Longitude))
cfsData := persistence.CfsFindAllByLocation(models.BuildLocation(request.Latitude, request.Longitude))
cfsData.MinTemps = append(baseData.MinTemps, cfsData.MinTemps...) normalsData := persistence.NormalsFindFirstByYearAndLocation(models.BuildLocation(request.Latitude, request.Longitude))
if len(cfsData.MinTemps) < len(normalsData.MinTemps) { cfsData.MinTemps = append(baseData.MinTemps, cfsData.MinTemps...)
smallerNormalRegion := normalsData.MinTemps[len(cfsData.MinTemps):]
cfsData.MinTemps = append(cfsData.MinTemps, smallerNormalRegion...) if len(cfsData.MinTemps) < len(normalsData.MinTemps) {
} smallerNormalRegion := normalsData.MinTemps[len(cfsData.MinTemps):]
cfsData.MinTemps = append(cfsData.MinTemps, smallerNormalRegion...)
startDate := time.Date(time.Now().Year(), time.January, 1, 0, 0, 0, 0, time.UTC) }
firstHalfFirstDate := time.Date(time.Now().Year(), time.January, 1, 0, 0, 0, 0, time.UTC) startDate := time.Date(time.Now().Year(), time.January, 1, 0, 0, 0, 0, time.UTC)
firstHalfLastDate := time.Date(time.Now().Year(), time.July, 31, 0, 0, 0, 0, time.UTC)
firstHalfFirstDate := time.Date(time.Now().Year(), time.January, 1, 0, 0, 0, 0, time.UTC)
lastHalfFirstDate := time.Date(time.Now().Year(), time.August, 1, 0, 0, 0, 0, time.UTC) firstHalfLastDate := time.Date(time.Now().Year(), time.July, 31, 0, 0, 0, 0, time.UTC)
lastHalfLastDate := time.Date(time.Now().Year(), time.December, 31, 0, 0, 0, 0, time.UTC)
lastHalfFirstDate := time.Date(time.Now().Year(), time.August, 1, 0, 0, 0, 0, time.UTC)
for i := 0; i < len(cfsData.MinTemps); i++ { lastHalfLastDate := time.Date(time.Now().Year(), time.December, 31, 0, 0, 0, 0, time.UTC)
currentDate := startDate.AddDate(0, 0, i)
if cfsData.MinTemps[i] <= request.FreezingTemp && currentDate.After(firstHalfFirstDate) && currentDate.Before(firstHalfLastDate) { for i := 0; i < len(cfsData.MinTemps); i++ {
lastFreezeIdx = i currentDate := startDate.AddDate(0, 0, i)
} if cfsData.MinTemps[i] <= request.FreezingTemp && currentDate.After(firstHalfFirstDate) && currentDate.Before(firstHalfLastDate) {
if cfsData.MinTemps[i] <= request.FreezingTemp && currentDate.After(lastHalfFirstDate) && currentDate.Before(lastHalfLastDate) && firstFreezeIdx == 0 { lastFreezeIdx = i
firstFreezeIdx = i }
break if cfsData.MinTemps[i] <= request.FreezingTemp && currentDate.After(lastHalfFirstDate) && currentDate.Before(lastHalfLastDate) && firstFreezeIdx == 0 {
} firstFreezeIdx = i
} break
}
lastFreezeDate := startDate.AddDate(0, 0, lastFreezeIdx) }
firstFreezeDate := startDate.AddDate(0, 0, firstFreezeIdx)
lastFreezeDate := startDate.AddDate(0, 0, lastFreezeIdx)
return models.FreezingForecastResponse{ firstFreezeDate := startDate.AddDate(0, 0, firstFreezeIdx)
LastFreeze: []time.Time{lastFreezeDate},
FirstFreeze: []time.Time{firstFreezeDate}, return models.FreezingForecastResponse{
} LastFreeze: []time.Time{lastFreezeDate},
FirstFreeze: []time.Time{firstFreezeDate},
} }
}
kind: product
name: dawn-gdd
---
kind: release
product-name: dawn-gdd
product-version: 0.1.0
release-channel: DEV
helm-chart: dawn/dawn-gdd
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment