-
-
Notifications
You must be signed in to change notification settings - Fork 14
/
Copy pathinferencecontext.go
52 lines (44 loc) · 1.63 KB
/
inferencecontext.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
package model
import (
"github.com/adalkiran/llama-nuts-and-bolts/src/common"
"github.com/adalkiran/llama-nuts-and-bolts/src/ml"
)
type InferenceContext struct {
SequenceLength int // context size used during inference
CacheK []*ml.Tensor
CacheV []*ml.Tensor
logFn func(format string, v ...any)
}
func NewInferenceContext(model *Model, inferenceArgs common.InferenceArgs, logFn func(format string, v ...any)) *InferenceContext {
// See: https://github.com/ggerganov/llama.cpp/blob/a7aee47b98e45539d491071b25778b833b77e387/llama.cpp#L9304C14-L9304C14
context := &InferenceContext{
logFn: logFn,
}
if inferenceArgs.SequenceLength > 0 {
context.SequenceLength = inferenceArgs.SequenceLength
} else {
context.SequenceLength = model.ModelArgs.MaxSequenceLength
}
modelArgs := model.ModelArgs
context.CacheK = make([]*ml.Tensor, modelArgs.N_Layers)
context.CacheV = make([]*ml.Tensor, modelArgs.N_Layers)
for layerIdx := 0; layerIdx < modelArgs.N_Layers; layerIdx++ {
context.CacheK[layerIdx], _ = ml.Zeros([]int{
inferenceArgs.SequenceLength, // specified argument value (default 4096)
modelArgs.N_KVHeads, // 32
modelArgs.HeadDim, // 128
}, ml.DT_BF16)
context.CacheV[layerIdx], _ = ml.Zeros([]int{
inferenceArgs.SequenceLength, // specified argument value (default 4096)
modelArgs.N_KVHeads, // 32
modelArgs.HeadDim, // 128
}, ml.DT_BF16)
}
common.GLogger.DebugPrintf("Inference Context created with SequenceLength: %d", context.SequenceLength)
return context
}
func (ic *InferenceContext) Logf(format string, v ...any) {
if ic.logFn != nil {
ic.logFn(format, v...)
}
}