Skip to main content

ONNX Runtime

import * as ort from 'onnxruntime-web';
// Load the model and create InferenceSession
const modelPath = 'path/to/your/onnx/model';
const session = await ort.InferenceSession.create(modelPath);
// Load and preprocess the input image to inputTensor
// ...
// Run inference
const outputs = await session.run({ input: inputTensor });
console.log(outputs);