I’m attempting to embed some text as vectors in the browser using rust, webassembly and wonnx.
When I try and run my model in the demo, I get these errors in the console:
wonnx_embeddings_repro_bg.js:820 Failed to load model: IR error: issue with data types: encountered parametrized dimensions 'unk__0'; this is not currently supported (this may be solved by running onnx-simplifier on the model first)
__wbg_error_fe807da27c4a4ced @ wonnx_embeddings_repro_bg.js:820
$func184 @ wonnx_embeddings_repro_bg.wasm?t=1721295649015:0x3f7a8
$func1067 @ wonnx_embeddings_repro_bg.wasm?t=1721295649015:0x219fe7
$func2910 @ wonnx_embeddings_repro_bg.wasm?t=1721295649015:0x29f7f3
$_dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__hd5bdc993e1827b1c @ wonnx_embeddings_repro_bg.wasm?t=1721295649015:0x29f7e5
__wbg_adapter_33 @ wonnx_embeddings_repro_bg.js:227
real @ wonnx_embeddings_repro_bg.js:208
App.tsx:7 Uncaught (in promise) Failed to load embedder: Failed to load model: IR error: issue with data types: encountered parametrized dimensions 'unk__0'; this is not currently supported (this may be solved by running onnx-simplifier on the model first)
Example
// lib.rs
use std::{collections::HashMap, sync::Arc};
use wonnx::utils::{InputTensor, OutputTensor};
use web_sys::console;
use tokenizers::tokenizer::Tokenizer;
use wasm_bindgen::JsValue;
use wasm_bindgen::prelude::wasm_bindgen;
const DIM: usize = 384;
static MODEL_DATA: &'static [u8] = include_bytes!("../model/all-MiniLM-L6-v2/onnx/model_sim.onnx",);
static TOKENIZER_DATA: &'static [u8] = include_bytes!("../model/all-MiniLM-L6-v2/tokenizer.json",);
fn average_pool(last_hidden_layer: &[f32], mask: &[i32]) -> Vec<f32> {
// input 1,512,emb_d , len = 1x512
// mask is 1,512
// let mut avg: Vec<f32> = vec![0.0; 384];
let mask_sum: i32 = mask.iter().sum();
let avg = last_hidden_layer
.chunks(DIM)
.enumerate()
.filter(|(idx, _)| mask[*idx] == 1)
.fold(vec![0.0; DIM], |acc, (_, layer)| {
dbg!(&layer.len());
acc.into_iter()
.zip(layer)
.map(|(l, &r)| l + r)
.collect::<Vec<_>>()
});
dbg!(&avg[..10]);
avg.into_iter().map(|e| e / mask_sum as f32).collect()
}
pub struct Embedder {
session: Arc<wonnx::Session>,
tokenizer: Tokenizer,
}
impl Embedder {
pub async fn load() -> Result<Embedder, String> {
console::log_1(&"Starting to load tokenizer".into());
let tokenizer = Tokenizer::from_bytes(TOKENIZER_DATA)
.map_err(|e| format!("Failed to load tokenizer: {}", e))?;
console::log_1(&"Tokenizer loaded successfully".into());
console::log_1(&"Starting to load model".into());
let session = wonnx::Session::from_bytes(MODEL_DATA)
.await
.map_err(|e| {
let error_msg = format!("Failed to load model: {}", e);
console::error_1(&error_msg.clone().into());
error_msg
})?;
console::log_1(&"Model loaded successfully".into());
Ok(Self {
session: Arc::new(session),
tokenizer,
})
}
pub async fn embed_query(&self, txt: String) -> Result<Vec<f32>, String> {
let mut input: HashMap<String, InputTensor> = HashMap::new();
let encoding = self.tokenizer.encode(txt, true).unwrap();
let tokens: Vec<i32> = encoding
.get_ids()
.iter()
.map(|&e| e as i32)
.collect::<Vec<_>>();
let token_type_ids = encoding
.get_type_ids()
.iter()
.map(|&e| e as i32)
.collect::<Vec<_>>();
let attention_mask = encoding
.get_attention_mask()
.iter()
.map(|&e| e as i32)
.collect::<Vec<_>>();
input.insert("input_ids".to_string(), tokens[..].into());
input.insert("attention_mask".to_string(), attention_mask[..].into());
input.insert("token_type_ids".to_string(), token_type_ids[..].into());
let output = self.session.clone().run(&input).await.unwrap();
match output.get(&"last_hidden_state".to_string()).unwrap() {
OutputTensor::F32(last_hidden_layer) => {
dbg!(&last_hidden_layer[..10]);
let emb = average_pool(last_hidden_layer, &attention_mask);
Ok(emb)
}
_ => Err("can't have other type".to_string()),
}
}
}
pub struct EmbeddingService {
embedder: Embedder,
}
impl EmbeddingService {
pub async fn new() -> Result<EmbeddingService, JsValue> {
let embedder = Embedder::load()
.await
.map_err(|e| JsValue::from_str(&format!("Failed to load embedder: {}", e)))?;
Ok(EmbeddingService { embedder })
}
pub async fn embed_text(&self, text: String) -> Result<Vec<f32>, JsValue> {
self.embedder
.embed_query(text)
.await
.map_err(|e| JsValue::from_str(&format!("Failed to embed text: {}", e)))
}
}
#[wasm_bindgen]
pub async fn embed() -> Result<js_sys::Float32Array, JsValue> {
let service = EmbeddingService::new().await
.map_err(|e| JsValue::from(e))?;
let embedding = service.embed_text("Your text here".to_string()).await
.map_err(|e| JsValue::from(e))?;
Ok(js_sys::Float32Array::from(&embedding[..]))
}
// usage in App.tsx
import * as wasm from "wonnx-embeddings-repro"
function App() {
async function run() {
const result = await wasm.embed()
console.log(result)
}
return (
<>
<button onClick={() => run()}>Run wasm</button>
</>
)
}
export default App
Detailed Reproduction Repo:
- jacobhq/wonnx-embeddings-repro
- Output of
cargo tree
: https://gist.github.com/jacobhq/f20dfe14e5adf7a60843d29e5eccc6e2
I’ve tried:
- Running
onnxsim
on the model - The Supabase/gte-small model, which I saw worked in a similar setup in AmineDiro/docvec
I would appreciate it if anyone could explain the cause of this error, because the model has an onnx badge on huggingface, which leads me to believe that it should work. If not, if anyone knows any embedding models that work with wonnxm, that would be equally good.
2