Skip to content

Latest commit

 

History

History

Folders and files

NameName
Last commit message
Last commit date

parent directory

..
 
 

Teachable Machine Library - Audio

Library for using audio models created with Teachable Machine.

Model checkpoints

There is one link related to your model that will be provided by Teachable Machine

https://teachablemachine.withgoogle.com/models/MODEL_ID/

Which you can use to access:

  • The model topology: https://teachablemachine.withgoogle.com/models/MODEL_ID/model.json
  • The model metadata: https://teachablemachine.withgoogle.com/models/MODEL_ID/metadata.json

Usage

There are two ways to easily use the model provided by Teachable Machine in your Javascript project: by using this library via script tags or by installing this library from NPM (and using a build tool ike Parcel, WebPack, or Rollup)

via Script Tag

<script src="https://cdn.jsdelivr.net/npm/@tensorflow/[email protected]/dist/tf.min.js">
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/[email protected]/dist/speech-commands.min.js">

via NPM

npm i @tensorflow/tfjs
npm i @tensorflow-models/speech-commands
import * as tf from '@tensorflow/tfjs';
import * as speechCommands from '@tensorflow-models/speech-commands';

Sample snippet

<div>Teachable Machine Audio Model</div>
<button type='button' onclick='init()'>Start</button>
<div id='label-container'></div>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/[email protected]/dist/tf.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/[email protected]/dist/speech-commands.min.js"></script>

<script type="text/javascript">
    // more documentation available at
    // https://github.com/tensorflow/tfjs-models/tree/master/speech-commands

    // the link to your model provided by Teachable Machine export panel
    const URL = '{{URL}}';

    async function createModel() {
        const checkpointURL = URL + 'model.json'; // model topology
        const metadataURL = URL + 'metadata.json'; // model metadata

        const recognizer = speechCommands.create(
            'BROWSER_FFT', // fourier transform type, not useful to change
            undefined, // speech commands vocabulary feature, not useful for your models
            checkpointURL,
            metadataURL);

        // check that model and metadata are loaded via HTTPS requests.
        await recognizer.ensureModelLoaded();

        return recognizer;
    }

    async function init() {
        const recognizer = await createModel();
        const classLabels = recognizer.wordLabels(); // get class labels
        const labelContainer = document.getElementById('label-container');
        for (let i = 0; i < classLabels.length; i++) {
            labelContainer.appendChild(document.createElement('div'));
        }

        // listen() takes two arguments:
        // 1. A callback function that is invoked anytime a word is recognized.
        // 2. A configuration object with adjustable fields
        recognizer.listen(result => {
            const scores = result.scores; // probability of prediction for each class
            // render the probability scores per class
            for (let i = 0; i < classLabels.length; i++) {
                const classPrediction = classLabels[i] + ': ' + result.scores[i].toFixed(2);
                labelContainer.childNodes[i].innerHTML = classPrediction;
            }
        }, {
            includeSpectrogram: true, // in case listen should return result.spectrogram
            probabilityThreshold: 0.75,
            invokeCallbackOnNoiseAndUnknown: true,
            overlapFactor: 0.50 // probably want between 0.5 and 0.75. More info in README
        });

        // Stop the recognition in 5 seconds.
        // setTimeout(() => recognizer.stopListening(), 5000);
    }
</script>

API

Please refer to the Speech Commands model documentation for more details about the API.