diff --git a/config/local b/config/local
index 9c2e9fb6f36d5e22d84afafb8219221744d93810..6853bfe90e724be74731d27ec6e09c5a936c94f1 100644
--- a/config/local
+++ b/config/local
@@ -13,7 +13,7 @@ server:
   context-path: "/api/weather"
 
 db:
-  uri: "mongodb://127.0.0.1:27017/"
+  uri: "mongodb://10.104.30.211:27017/"
   database: "weather-service"
 
 logging:
diff --git a/go.mod b/go.mod
index 8ca8e25867b25c6561dca8d611cc605d91d0f81c..abfaa055bc2becc85a7e41262e0fb7de4d367f7b 100644
--- a/go.mod
+++ b/go.mod
@@ -1,30 +1,84 @@
 module gitlab.cs.umd.edu/dawn/go-backend/dawn-gdd
 
-go 1.16
+go 1.18
 
 require (
 	github.com/ansrivas/fiberprometheus/v2 v2.1.2
 	github.com/arsmn/fiber-swagger/v2 v2.15.0
-	github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
 	github.com/bradfitz/slice v0.0.0-20180809154707-2b758aa73013
-	github.com/go-openapi/jsonreference v0.19.6 // indirect
-	github.com/go-openapi/swag v0.19.15 // indirect
 	github.com/go-ozzo/ozzo-validation v3.6.0+incompatible
-	github.com/gofiber/adaptor/v2 v2.1.14 // indirect
 	github.com/gofiber/fiber/v2 v2.31.0
 	github.com/google/uuid v1.3.0
-	github.com/mailru/easyjson v0.7.7 // indirect
-	github.com/montanaflynn/stats v0.6.6
-	github.com/prometheus/common v0.30.0 // indirect
-	github.com/prometheus/procfs v0.7.2 // indirect
 	github.com/spf13/cobra v1.5.0
 	github.com/spf13/viper v1.9.0
 	github.com/stretchr/testify v1.7.0
 	github.com/swaggo/swag v1.7.1
 	github.com/tgs266/dawn-go-common v0.0.0-20221006213126-74b2f745df9d
 	go.mongodb.org/mongo-driver v1.7.3
+	gonum.org/v1/gonum v0.9.3
+)
+
+require (
+	github.com/KyleBanks/depth v1.2.1 // indirect
+	github.com/PuerkitoBio/purell v1.1.1 // indirect
+	github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
+	github.com/andybalholm/brotli v1.0.4 // indirect
+	github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
+	github.com/beorn7/perks v1.0.1 // indirect
+	github.com/cespare/xxhash/v2 v2.1.1 // indirect
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/fsnotify/fsnotify v1.5.1 // indirect
+	github.com/go-openapi/jsonpointer v0.19.5 // indirect
+	github.com/go-openapi/jsonreference v0.19.6 // indirect
+	github.com/go-openapi/spec v0.20.3 // indirect
+	github.com/go-openapi/swag v0.19.15 // indirect
+	github.com/go-stack/stack v1.8.0 // indirect
+	github.com/gobwas/glob v0.2.3 // indirect
+	github.com/gofiber/adaptor/v2 v2.1.14 // indirect
+	github.com/gofiber/utils v0.1.2 // indirect
+	github.com/golang/protobuf v1.5.2 // indirect
+	github.com/golang/snappy v0.0.3 // indirect
+	github.com/hashicorp/hcl v1.0.0 // indirect
+	github.com/inconshreveable/mousetrap v1.0.0 // indirect
+	github.com/josharian/intern v1.0.0 // indirect
+	github.com/klauspost/compress v1.15.0 // indirect
+	github.com/magiconair/properties v1.8.5 // indirect
+	github.com/mailru/easyjson v0.7.7 // indirect
+	github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
+	github.com/mileusna/useragent v1.2.1 // indirect
+	github.com/mitchellh/mapstructure v1.4.2 // indirect
+	github.com/pelletier/go-toml v1.9.4 // indirect
+	github.com/pkg/errors v0.9.1 // indirect
+	github.com/pmezard/go-difflib v1.0.0 // indirect
+	github.com/prometheus/client_golang v1.11.0 // indirect
+	github.com/prometheus/client_model v0.2.0 // indirect
+	github.com/prometheus/common v0.30.0 // indirect
+	github.com/prometheus/procfs v0.7.2 // indirect
+	github.com/spf13/afero v1.6.0 // indirect
+	github.com/spf13/cast v1.4.1 // indirect
+	github.com/spf13/jwalterweatherman v1.1.0 // indirect
+	github.com/spf13/pflag v1.0.5 // indirect
+	github.com/streadway/amqp v1.0.0 // indirect
+	github.com/subosito/gotenv v1.2.0 // indirect
+	github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14 // indirect
+	github.com/undefinedlabs/go-mpatch v1.0.6 // indirect
+	github.com/valyala/bytebufferpool v1.0.0 // indirect
+	github.com/valyala/fasthttp v1.34.0 // indirect
+	github.com/valyala/tcplisten v1.0.0 // indirect
+	github.com/xdg-go/pbkdf2 v1.0.0 // indirect
+	github.com/xdg-go/scram v1.0.2 // indirect
+	github.com/xdg-go/stringprep v1.0.2 // indirect
+	github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
 	go4.org v0.0.0-20201209231011-d4a079459e60 // indirect
+	golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect
 	golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 // indirect
+	golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
+	golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
+	golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9 // indirect
+	golang.org/x/text v0.3.7 // indirect
 	golang.org/x/tools v0.1.8 // indirect
-	gonum.org/v1/gonum v0.9.3
+	google.golang.org/protobuf v1.27.1 // indirect
+	gopkg.in/ini.v1 v1.63.2 // indirect
+	gopkg.in/yaml.v2 v2.4.0 // indirect
+	gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
 )
diff --git a/go.sum b/go.sum
index 1d006198fb311c60b1da832e57511d17a4bbf1bd..db534359f35ba7b5ee60dc06b168bd3b7cab0f61 100644
--- a/go.sum
+++ b/go.sum
@@ -95,7 +95,6 @@ github.com/bradfitz/slice v0.0.0-20180809154707-2b758aa73013/go.mod h1:pccXHIvs3
 github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
 github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
 github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
 github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
 github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -147,7 +146,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
 github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
 github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
 github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
 github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
 github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks=
 github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
@@ -435,8 +433,6 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
 github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
 github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
-github.com/montanaflynn/stats v0.6.6 h1:Duep6KMIDpY4Yo11iFsvyqJDyfzLF9+sndUKT+v64GQ=
-github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
 github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
 github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
@@ -612,7 +608,6 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
 github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
 go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
 go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
 go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
@@ -656,7 +651,6 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh
 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
 golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSOnDjXcQ7ZRLWOHbC6HtRqE=
 golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -706,7 +700,6 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
 golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 h1:LQmS1nU0twXLA96Kt7U9qtHJEbBk3z6Q0V4UXjZkpr4=
 golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -758,7 +751,6 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1
 golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=
 golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
@@ -865,7 +857,6 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9 h1:nhht2DYV/Sn3qOayu8lM+cU1ii9sTLUeBQwQQfUHtrs=
 golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
diff --git a/models/forecast.go b/models/forecast.go
index 65a454b3a8852cc9a85f64276de10fc8e3d0d863..9cd2238d6623cbb2cea30ff3364c892ae0648527 100644
--- a/models/forecast.go
+++ b/models/forecast.go
@@ -149,7 +149,7 @@ func (s StageStateInner) ExtractDates(plantDate time.Time, start int) []time.Tim
 	return ret
 }
 
-func BuildStageMatches(mode string, value int, start int, fyData StageData, req StageRequest) map[string]float64 {
+func BuildStageMatches(mode string, value int) map[string]float64 {
 	var harvestVal float64
 	if mode == "rm" {
 		harvestVal = float64(((float64(value) - 95.0) * 22.0) + 2375.0)
@@ -157,31 +157,6 @@ func BuildStageMatches(mode string, value int, start int, fyData StageData, req
 		harvestVal = float64(value)
 	}
 
-	// if !req.AnchorDate.IsZero() {
-	// 	mod := 1.0
-	// 	switch req.AnchorStage {
-	// 	case "emergence":
-	// 		mod = 1.0 / 0.07
-	// 	case "3LeafCollar":
-	// 		mod = 1.0 / 0.13
-	// 	case "6LeafCollar":
-	// 		mod = 1.0 / 0.2
-	// 	case "silk":
-	// 		mod = 1.0 / 0.545
-	// 	case "milk":
-	// 		mod = 1.0 / 0.725
-	// 	}
-	// 	accMean := 0.0
-	// 	for i := start; i < len(fyData.MaxGdds); i++ {
-	// 		accMean += fyData.MeanGdds[i]
-	// 		meanDate := req.PlantDate.AddDate(0, 0, i-start)
-	// 		if req.AnchorDate == meanDate {
-	// 			harvestVal = accMean * mod
-	// 			break
-	// 		}
-	// 	}
-	// }
-
 	return map[string]float64{
 		"emergence":   harvestVal * 0.07,
 		"3LeafCollar": harvestVal * 0.13,
diff --git a/services/forecast_service.go b/services/forecast_service.go
index 800af411a96b66888576869c179ad191c28de2cd..99a819693b330acf02fd10fa9000dd856e755c9b 100644
--- a/services/forecast_service.go
+++ b/services/forecast_service.go
@@ -5,142 +5,330 @@ import (
 	"sort"
 	"time"
 
-	"github.com/montanaflynn/stats"
 	"github.com/tgs266/dawn-go-common/common"
 	"gitlab.cs.umd.edu/dawn/go-backend/dawn-gdd/models"
 	"gitlab.cs.umd.edu/dawn/go-backend/dawn-gdd/models/enums"
 	"gitlab.cs.umd.edu/dawn/go-backend/dawn-gdd/persistence"
 	"gitlab.cs.umd.edu/dawn/go-backend/dawn-gdd/persistence/entities"
 	"gitlab.cs.umd.edu/dawn/go-backend/dawn-gdd/utils"
+	"gitlab.cs.umd.edu/dawn/go-backend/dawn-gdd/utils/dispatch"
 )
 
-func GetStageYearData(ctx common.DawnCtx, request models.GddRequest, comparison int) models.StageData {
-	product := enums.GetProductFromString(request.Product)
-	gddData := persistence.CurrentGddFindFirstByYearAndLocation(ctx, request.BuildLocation())
-	gdds := utils.CalculateGddValues(gddData.MinTemps, gddData.MaxTemps, product, false)
-	request.Year = gddData.AnalogYear
-
-	var gs []entities.Gdd
-	norms := persistence.GetLastNormalsYearly(request.BuildLocation())
-
-	if comparison == -1 {
-		gs = norms
-	} else {
-		gs = []entities.Gdd{persistence.GddFindFirstByYearAndLocation(comparison, request.BuildLocation())}
+// this just adjusts cfs by accumulating it with the base value being the provided accumulated value
+func calculateAccumulatedCfsBasedOnAccumulatedObserved(product enums.Product, accumulated float64, cfs []entities.CfsGdd) [][]float64 {
+	out := [][]float64{}
+	for _, c := range cfs {
+		tempAccum := accumulated
+		temp := []float64{}
+		for i := range c.MinTemps {
+			tempAccum = tempAccum + utils.CalculateSingleGdd(c.MinTemps[i], c.MaxTemps[i], product)
+			temp = append(temp, tempAccum)
+		}
+		out = append(out, temp)
 	}
-	var normalMeanNonAcc []float64
-	comparisonRows := [][]float64{}
+	return out
+}
 
-	for i := 0; i < len(gs[0].MinTemps); i++ {
-		rowComp := []float64{}
-		rowNormal := []float64{}
-		for j := 0; j < len(gs); j++ {
-			rowComp = append(rowComp, utils.CalculateSingleGdd(gs[j].MinTemps[i], gs[j].MaxTemps[i], product))
-		}
-		for j := 0; j < len(norms); j++ {
-			rowNormal = append(rowNormal, utils.CalculateSingleGdd(norms[j].MinTemps[i], norms[j].MaxTemps[i], product))
+// Since cfs is multiple predictions of a single time frame, we need to boil down
+// the data into an array of dates to then bin later on
+func getDatesForCfsMatches(cfs [][]float64, lastDateInt int, currentMatch int, keys []string, matches map[string]float64) map[string][]int {
+	out := map[string][]int{}
+	for _, c := range cfs {
+		lastDist := matches[keys[currentMatch]]
+		tempCMatch := currentMatch
+		for i, v := range c {
+			dist := math.Abs(matches[keys[currentMatch]] - v)
+			// check if the last value is closer than the current. if it is, then the last value is the one to return
+			if dist > lastDist {
+				if v, exists := out[keys[tempCMatch]]; exists {
+					out[keys[tempCMatch]] = append(v, lastDateInt+i-1)
+				} else {
+					out[keys[tempCMatch]] = []int{lastDateInt + i - 1}
+				}
+				tempCMatch += 1
+				if tempCMatch >= len(keys) {
+					break
+				}
+			}
+			lastDist = dist
 		}
-		comparisonRows = append(comparisonRows, rowComp)
-		normMeanNoAccValue, _ := stats.Mean(rowNormal)
-		normalMeanNonAcc = append(normalMeanNonAcc, normMeanNoAccValue)
-	}
-
-	allCfs := persistence.CfsFindByLocationMultiple(request.BuildLocation(), 4)
-	// cfsMeans := persistence.CfsFindAllByLocation(request.BuildLocation())
-
-	gddArr := [][]float64{}
-	for i, c := range allCfs {
-		gddArr = append(gddArr, gdds)
-		cfsGddData := utils.CalculateGddValues(c.MinTemps, c.MaxTemps, product, false) // not accumulated
-		// anomaly adjustment function
-		// cfsGddData := utils.CalculateGddValuesCfsNormed(c.MinTemps, c.MaxTemps, product, cfsMeans.MinTemps, cfsMeans.MaxTemps, normalMeanNonAcc) // not accumulated
-		gddArr[i] = append(gddArr[i], cfsGddData...)
-		if len(gddArr[i]) > len(normalMeanNonAcc) {
-			gddArr[i] = gddArr[i][:len(normalMeanNonAcc)]
+	}
+	return out
+}
+
+// bin array of dates into a frequency map and return that map and keys
+func bin(data []int) (map[int]int, []int) {
+	out := map[int]int{}
+	keys := []int{}
+	for _, d := range data {
+		if count, exists := out[d]; exists {
+			out[d] = count + 1
 		} else {
-			gddArr[i] = append(gddArr[i], normalMeanNonAcc[len(gddArr[i]):]...)
+			out[d] = 1
+			keys = append(keys, d)
 		}
 	}
-	// none of this data is accumulated
-	returnData := models.StageData{
-		AllGdds:       gddArr,
-		ComparisonAll: comparisonRows,
-	}
-	return returnData
+	return out, keys
 }
 
-func CalculateStages(ctx common.DawnCtx, request models.StageRequest) map[string]models.Bins {
-	gddReq := models.GddRequest{
-		Year:       request.PlantDate.Year(),
-		Latitude:   request.Latitude,
-		Longitude:  request.Longitude,
-		Accumulate: false,
-		Product:    "CORN",
+// get the highest frequency bin
+func getLargestBinCount(bins map[int]int) int {
+	largest := 0
+	largestKey := 0
+	for key, count := range bins {
+		if count > largest {
+			largest = count
+			largestKey = key
+		}
 	}
-	fyData := GetStageYearData(ctx, gddReq, request.Comparison)
+	return largestKey
+}
 
-	start := request.PlantDate.YearDay()
-	year := request.PlantDate.Year()
-	if year%4 == 0 && year%100 != 0 || year%400 == 0 {
-		start -= 1
+// get keys of the stage matches in sorted order
+func getMatchKeys(matches map[string]float64) []string {
+	keys := make([]string, 0, len(matches))
+
+	for key := range matches {
+		keys = append(keys, key)
 	}
 
-	state := map[string]models.StageStateInner{}
-	stageMatches := models.BuildStageMatches(request.Mode, request.Value, start, fyData, request)
+	sort.SliceStable(keys, func(i, j int) bool {
+		return matches[keys[i]] < matches[keys[j]]
+	})
+	return keys
+}
 
-	accs := make([]float64, len(fyData.AllGdds))
-	accs2 := make([]float64, len(fyData.ComparisonAll[0]))
-	accNormal := 0.0
-	for i := start; i < len(fyData.AllGdds[0]); i++ {
+/* this does the actual forecasting
 
-		for r, v := range fyData.AllGdds {
-			accs[r] += v[i]
-		}
-		for j := 0; j < len(fyData.ComparisonAll[0]); j++ {
-			accs2[j] += fyData.ComparisonAll[i][j]
+hardcode the alpha (for lagrange smoothing) and binCount
+*/
+func forecast(ctx common.DawnCtx, gddReq models.GddRequest, plantdate time.Time, matches map[string]float64, observed models.GddResponse, cfs []entities.CfsGdd) map[string]*models.Bins {
+	alpha := 1.0
+	binCount := 5
+	product := enums.GetProductFromString(gddReq.Product)
+	start := plantdate.YearDay()
+	if plantdate.Year()%4 == 0 && plantdate.Year()%100 != 0 || plantdate.Year()%400 == 0 {
+		start -= 1
+	}
+
+	out := map[string]*models.Bins{}
+	// need to get the match order
+	keys := getMatchKeys(matches)
+
+	currentMatch := 0 // first match
+
+	// start at plantdate, begin accumulating OBSERVED until we run out
+	// since this observed, we should only have 1 bin with a value of 100%
+	// at the end, we should have all observed stages along with all accumulated gdds of the planting year
+	observedValues := observed.GddValues[start:]
+	accumulatedGdds := 0.0
+	lastDist := matches[keys[currentMatch]]
+	date := 0
+	for i := 0; i < len(observedValues); i++ {
+		date = i
+		dist := math.Abs(matches[keys[currentMatch]] - accumulatedGdds)
+		// check if the last value is closer than the current. if it is, then the last value is the one to return
+		if dist > lastDist {
+			out[keys[currentMatch]] = &models.Bins{
+				Bins: []models.Bin{
+					{
+						Value: 1,
+						Date:  plantdate.AddDate(0, 0, i-1),
+					},
+				},
+			}
+			currentMatch += 1
+			if currentMatch >= len(keys) {
+				break
+			}
+			dist = matches[keys[currentMatch]]
 		}
+		accumulatedGdds += observedValues[i]
+		lastDist = dist
+	}
 
-		normal, _ := stats.Mean(accs2)
+	// adjust cfs values to start at the accumulated value
+	adjustedCfs := calculateAccumulatedCfsBasedOnAccumulatedObserved(product, accumulatedGdds, cfs)
+	cfsHist := getDatesForCfsMatches(adjustedCfs, date, currentMatch, keys, matches)
 
-		accNormal = normal
+	// this loop will actually build the 5 bins
+	for k, v := range cfsHist {
 
-		for stage, stageVal := range stageMatches {
-			dists := make([]float64, len(fyData.AllGdds))
-			for r, v := range accs {
-				dists[r] = math.Abs(stageVal - v)
-			}
-			if val, ok := state[stage]; !ok {
+		binnedDates, keys := bin(v)
+		stepSize := int(math.Ceil(AvgDiff(keys)) + 1) // add 1 to increase range and for uncertainty
+		largestKey := getLargestBinCount(binnedDates)
 
-				state[stage] = models.StageStateInner{
-					Dists:          dists,
-					Hists:          make([]int, len(fyData.AllGdds)),
-					NormalMeanDist: 1000000,
-					NormalMeanIdx:  0,
-				}
-			} else {
-				normalMeanDist := math.Abs(stageVal - accNormal)
+		sort.Ints(keys)
 
-				if normalMeanDist < val.NormalMeanDist {
-					val.NormalMeanDist = normalMeanDist
-					val.NormalMeanIdx = i
-				}
+		min := largestKey - stepSize*2
+		temp := map[int]int{}
+		// need to rebin now
+		sum := 0
 
-				for r := range accs {
-					if dists[r] < val.Dists[r] {
-						val.Hists[r] = i
-						val.Dists[r] = dists[r]
+		// we force the highest frequency to be the center date, then find dates on each side that fit
+		for i := 0; i < binCount; i++ {
+			for date, v := range binnedDates {
+				if min <= date && date < min+stepSize {
+					sum += v
+					if count, exists := temp[min]; exists {
+						temp[min] = count + v
+					} else {
+						temp[min] = v
 					}
 				}
-				state[stage] = val
 			}
+			min += stepSize
 		}
 
+		// convert to output model
+		bins := []models.Bin{}
+		for d, v := range temp {
+			bins = append(bins, models.Bin{
+				Value: (float64(v) + alpha) / (float64(sum) + float64(binCount)*alpha),
+				Date:  plantdate.AddDate(0, 0, d),
+			})
+		}
+		out[k] = &models.Bins{
+			Bins: bins,
+		}
+
+	}
+
+	return out
+}
+
+// will use normals to determine dates for each stage
+//
+// this function allows normals to "wrap around" to get continuous values
+func comparisonNormals(request models.GddRequest, plantdate time.Time, matches map[string]float64) map[string]time.Time {
+	normals := GetNormalValues(request)
+	observedValues := normals.GddValues
+	accumulatedGdds := 0.0
+	currentMatch := 0
+	keys := getMatchKeys(matches)
+	lastDist := matches[keys[currentMatch]]
+
+	start := plantdate.YearDay()
+	year := plantdate.Year()
+	if year%4 == 0 && year%100 != 0 || year%400 == 0 {
+		start -= 1
 	}
-	ret := BinStageMatches(state, year, start, request.PlantDate)
-	return ret
 
+	i := start
+	date := 0
+	out := map[string]time.Time{}
+
+	for {
+		dist := math.Abs(matches[keys[currentMatch]] - accumulatedGdds)
+		// check if the last value is closer than the current. if it is, then the last value is the one to return
+		if dist > lastDist {
+			out[keys[currentMatch]] = plantdate.AddDate(0, 0, date-1)
+			currentMatch += 1
+			if currentMatch >= len(keys) {
+				break
+			}
+			dist = matches[keys[currentMatch]]
+		}
+		accumulatedGdds += observedValues[i]
+		lastDist = dist
+		i += 1
+		date += 1
+		if i >= len(observedValues) {
+			i = 0
+		}
+	}
+	return out
 }
 
+// dispatches a go routine to handle the comparison values
+func comparisonGoRoutine(request models.GddRequest, plantdate time.Time, matches map[string]float64, comparisonMode int, thread *dispatch.Thread[map[string]time.Time]) {
+	defer func() {
+		thread.Recover(recover())
+	}()
+	if comparisonMode == -1 {
+		thread.Result(comparisonNormals(request, plantdate, matches))
+		return
+	}
+	thread.Result(map[string]time.Time{})
+	return
+}
+
+func asyncCollectGddsAndCfs(ctx common.DawnCtx, gddReq models.GddRequest) (models.GddResponse, []entities.CfsGdd) {
+	gddThread := dispatch.New[models.GddResponse]()
+	go func() {
+		defer func() {
+			r := recover()
+			gddThread.Recover(r)
+		}()
+		gdds := GetGddValues(ctx, gddReq)
+		gddThread.Result(gdds)
+	}()
+
+	cfsThread := dispatch.New[[]entities.CfsGdd]()
+	go func() {
+		defer func() { cfsThread.Recover(recover()) }()
+		cfs := persistence.CfsFindByLocationMultiple(gddReq.BuildLocation(), 4)
+		cfsThread.Result(cfs)
+	}()
+
+	gdds, err := gddThread.Get()
+	if err != nil {
+		panic(err)
+	}
+	cfs, err := cfsThread.Get()
+	if err != nil {
+		panic(err)
+	}
+
+	return gdds, cfs
+}
+
+/* HOW DOES THIS WORK:
+
+1. Collect observed and cfs data
+
+2. Determine matches for corn based on rm/gdds provided
+
+2.b. Dispatch comparison routine
+
+3. Match observed dates to matches for corn with the closest accum gdds
+
+4. Bin cfs projections (since there are a lot of them) into groups. Use same matching algo
+
+5. Join, and merge comparison results
+
+*/
+func CalculateStages(ctx common.DawnCtx, request models.StageRequest) map[string]*models.Bins {
+	gddReq := models.GddRequest{
+		Year:       request.PlantDate.Year(),
+		Latitude:   request.Latitude,
+		Longitude:  request.Longitude,
+		Accumulate: false,
+		Product:    "CORN",
+	}
+
+	gdds, cfs := asyncCollectGddsAndCfs(ctx, gddReq)
+
+	stageMatches := models.BuildStageMatches(request.Mode, request.Value)
+
+	comparisonThread := dispatch.New[map[string]time.Time]()
+
+	go comparisonGoRoutine(gddReq, request.PlantDate, stageMatches, request.Comparison, comparisonThread)
+	out := forecast(ctx, gddReq, request.PlantDate, stageMatches, gdds, cfs)
+
+	// block, wait for comparison results
+	compResults, compError := comparisonThread.Get()
+	if compError != nil {
+		panic(compError)
+	}
+	for k, v := range compResults {
+		out[k].ComparisonMean = v
+	}
+	return out
+
+}
+
+// find the average difference of a sorted array
 func AvgDiff(data []int) float64 {
 	sort.Ints(data)
 	sum := 0.0
@@ -152,59 +340,6 @@ func AvgDiff(data []int) float64 {
 	}
 	return sum / float64(c)
 }
-func Min(data []int) int {
-	sort.Ints(data)
-	return data[0]
-}
-
-func BinStageMatches(stageState map[string]models.StageStateInner, year int, start int, plantDate time.Time) map[string]models.Bins {
-	response := map[string]models.Bins{}
-	alpha := 1.0
-	add := 0
-	if year%4 == 0 && year%100 != 0 || year%400 == 0 {
-		add -= 1
-	}
-
-	binCount := 3
-
-	for state, stateVal := range stageState {
-		// min := stateVal.Normal95thIdx
-		min := Min(stateVal.Hists)
-		stepSize := int(math.Ceil(AvgDiff(stateVal.Hists)) + 1) // add 1 to increase range (cheating a little) and for uncertainty
-		arr := []float64{}
-		idxs := []int{}
-		base := min
-		total := 0
-		for i := 0; i < binCount; i++ {
-			count := 0.0
-			for _, h := range stateVal.Hists {
-				if base <= h && h < base+stepSize {
-					count += 1
-					total += 1
-				}
-			}
-			idxs = append(idxs, base)
-			arr = append(arr, count)
-			base += stepSize
-		}
-		inner := models.Bins{}
-		inner.Bins = []models.Bin{}
-		for i := 0; i < binCount; i++ {
-			idx := idxs[i] + add
-			date := plantDate.AddDate(0, 0, idx-start)
-			val := arr[i]
-			smoothedVal := (val + alpha) / (float64(total) + float64(binCount)*alpha) // modified version of laplace smoothing to remove 0%
-			inner.Bins = append(inner.Bins, models.Bin{
-				Date:  date,
-				Value: smoothedVal,
-			})
-		}
-		inner.ComparisonMean = plantDate.AddDate(0, 0, stateVal.NormalMeanIdx-start)
-		inner.Count = total
-		response[state] = inner
-	}
-	return response
-}
 
 func ForecastFirstLastFreeze(ctx common.DawnCtx, request models.FreezingForecastRequest) models.FreezingForecastResponse {
 	lastFreezeIdx := 0
diff --git a/utils/dispatch/dispatch.go b/utils/dispatch/dispatch.go
new file mode 100644
index 0000000000000000000000000000000000000000..aa4d2132787e437eec83765d9e56130d4a38ebb1
--- /dev/null
+++ b/utils/dispatch/dispatch.go
@@ -0,0 +1,72 @@
+package dispatch
+
+import (
+	"fmt"
+)
+
+/*
+Helper type for dispatching operations in a clean manner
+
+Please add a defer to any function that uses these
+
+Example:
+
+	gddThread := dispatch.New[models.GddResponse]()
+	go func() {
+		defer func() { gddThread.Recover(recover()) }()
+		gdds := GetGddValues(ctx, gddReq)
+		gddThread.Result(gdds)
+	}()
+
+	gddThread.Wait()
+
+	gdds, err := gddThread.Get()
+	if err != nil {
+		panic(err)
+	}
+*/
+type Thread[T any] struct {
+	result chan ThreadData[T]
+}
+
+type ThreadData[T any] struct {
+	err    error
+	result T
+}
+
+func New[T any]() *Thread[T] {
+	return &Thread[T]{
+		result: make(chan ThreadData[T]),
+	}
+}
+
+func (t *Thread[T]) Error(err error) {
+	t.result <- ThreadData[T]{
+		err: err,
+	}
+}
+
+func (t *Thread[T]) Result(r T) {
+	result := ThreadData[T]{
+		result: r,
+		err:    nil,
+	}
+
+	t.result <- result
+}
+
+func (t *Thread[T]) Recover(r any) {
+	if e, ok := r.(error); ok {
+		t.Error(e)
+	} else {
+		t.Error(fmt.Errorf("unknown error"))
+	}
+}
+
+// will block
+func (t *Thread[T]) Get() (T, error) {
+	select {
+	case result := <-t.result:
+		return result.result, result.err
+	}
+}