diff --git a/docs/code/getting-started/task-engine/server.cue b/docs/code/getting-started/task-engine/server.cue new file mode 100644 index 00000000..9609ff63 --- /dev/null +++ b/docs/code/getting-started/task-engine/server.cue @@ -0,0 +1,66 @@ +package examples + +import "strings" + +server: { + @flow(server) + + // task: get auth from external command + gcp: { + @task(os.Exec) + cmd: ["gcloud", "auth", "print-access-token"] + stdout: string + key: strings.TrimSpace(stdout) + } + + run: { + @task(api.Serve) + + port: "8080" + routes: { + + // simple hello route + "/hello": { + method: "GET" + resp: { + status: 200 + body: "hallo chat!" + } + } + + // echo request object back as json + "/jsonecho": { + method: ["GET", "POST"] + req: body: {} + resp: json: req + } + + // our gemini call warpped in an API + "/chat": { + @flow() + + method: "POST" + + // input schema + req: { + body: { + msg: string + } + } + + // task: api call via reusable task + call: _gemini & { + apikey: gcp.key + msg: req.body.msg + } + + // the response to user + resp: { + status: 200 + body: call.final.text + } + } + + } + } +} \ No newline at end of file diff --git a/docs/code/getting-started/task-engine/vertex.cue b/docs/code/getting-started/task-engine/vertex.cue index 49c156da..f8627578 100644 --- a/docs/code/getting-started/task-engine/vertex.cue +++ b/docs/code/getting-started/task-engine/vertex.cue @@ -2,6 +2,13 @@ package examples import "strings" +// inputs supplied via tags +inputs: { + model: string @tag(model) + prompt: string @tag(prompt) + msg: string @tag(msg) +} + vertex_chat: { @flow() // define a flow @@ -19,7 +26,9 @@ vertex_chat: { call: _gemini & { apikey: gcp.key - msg: "What is the CUE language?" + model: inputs.model + prompt: inputs.prompt + msg: inputs.msg resp: body: _ } @@ -37,11 +46,10 @@ vertex_chat: { _gemini: { @task(api.Call) + apikey: string model: string | *"gemini-1.0-pro-002:generateContent" - + prompt: string | *"You are an assistant who is very concise when responding." msg: string - apikey: string - prompt: string | *"You are a model which is direct and concise when responding." req: { host: "https://us-central1-aiplatform.googleapis.com" @@ -71,6 +79,7 @@ _gemini: { resp: { body: _ } + @print(resp.body) // task-local ETL final: { diff --git a/docs/content/getting-started/task-engine.md b/docs/content/getting-started/task-engine.md index 17599dd8..b5427faf 100644 --- a/docs/content/getting-started/task-engine.md +++ b/docs/content/getting-started/task-engine.md @@ -25,9 +25,19 @@ Hof's task engine is an extension of cue/flow with ### Example +
-{{}} +A workflow for calling an LLM from the command line. +{{}} + +
+ +Wrapping the workflow in an API server. + +`hof flow @server` (with both CUE files in the same directory, we can omit the filenames) + +{{}} ### Command