diff --git a/demos/openai/audio.md b/demos/openai/audio.md new file mode 100644 index 000000000..66fde7fd4 --- /dev/null +++ b/demos/openai/audio.md @@ -0,0 +1,76 @@ +# Audio + +```javascript +let mediaRecorder +await navigator.mediaDevices + .getUserMedia( + // constraints - only audio needed for this app + { + audio: true, + }, + ) + .then((stream) => { + mediaRecorder = new MediaRecorder(stream); + }) + .catch((err) => { + lively.err(`The following getUserMedia error occurred: ${err}`); + }); + + + +let chunks = []; + + +mediaRecorder.addEventListener("dataavailable", (e) => { + chunks.push(e.data); +}) + + +mediaRecorder.requestData() + +chunks.length + +mediaRecorder.start(); +mediaRecorder.stop() + + + + +const audio = document.createElement("audio"); + +const blob = new Blob(chunks, { type: "audio/ogg; codecs=opus" }); + chunks = []; + const audioURL = window.URL.createObjectURL(blob); + audio.src = audioURL; +audio.play() + + +audioURL + +import OpenAI from "demos/openai/openai.js" + let apiKey = await OpenAI.ensureSubscriptionKey() + + const formData = new FormData(); + + // Append the Blob object to the FormData object + formData.append('file', blob, 'audio.ogg'); + + // Append any other parameters required by the API + // For example, specifying the model or language + formData.append('model', 'whisper-1'); + + + + const url = "https://api.openai.com/v1/audio/transcriptions"; + + const requestOptions = { + method: "POST", + headers: { + "Authorization": `Bearer ${apiKey}` + }, + body: formData + }; + + +fetch(url, requestOptions).then(r => r.text()) +``` \ No newline at end of file diff --git a/demos/openai/vision.md b/demos/openai/vision.md new file mode 100644 index 000000000..ad17b5234 --- /dev/null +++ b/demos/openai/vision.md @@ -0,0 +1,56 @@ +# Vision + +![](vision.png) + + +![](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAAsUlEQVR4nO3aMQrCQBBF0XcU7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB7qB) + + +```javascript + +import OpenAI from "demos/openai/openai.js" + +let blob = await fetch("http://localhost:9005/lively4-core/demos/openai/vision.png").then(r => r.blob()) + +let image = await lively.files.readBlobAsDataURL(blob) + +let prompt = { + "model": "gpt-4o", + "max_tokens": 500, + "temperature": 0.1, + "top_p": 1, + "n": 1, + "stream": false, + "stop": "VANILLA", + "messages": [ + { "role": "system", "content": "You are an AI chat bot!" }, + { "role": "user", "content": [ + { + "type": "text", + "text": "What’s in this image?" + }, + { + "type": "image_url", + "image_url": { + "url": image, + "detail": "low" // high + } + } + ]} + ] + } +var json = await OpenAI.openAIRequest(prompt).then(r => r.json()) +json + + +json.choices[0].message + +``` + +Response: +``` + { +content: "The image appears to be a simple drawing of a smiling face. The face is outlined in red, with two eyes and a smiling mouth. The drawing is basic and cartoon-like." +role: "assistant" +} +``` \ No newline at end of file diff --git a/demos/openai/vision.png b/demos/openai/vision.png new file mode 100644 index 000000000..116252e49 Binary files /dev/null and b/demos/openai/vision.png differ