I’m trying to deploy a small LLM on a mobile device and am experimenting with both Smol and Qwen models. However, I keep encountering an issue where it says that the config.json file isn’t there.
python {
version "3.8"
pip {
options "--only-binary=:all:"
install "transformers==4.15.0"
install "tokenizers==0.10.3"
install "torch==1.8.1"
install "numpy==1.19.5"
options "--no-binary=pyyaml"
install "pyyaml==6.0.2"
}
}
plugins {
id 'com.android.application'
id 'kotlin-android'
id 'com.chaquo.python'
}
from transformers import AutoTokenizer, AutoModelForCausalLM
# Use Chaquopy's asset directory
# checkpoint = "app:model_files/Qwen2-0.5B-Instruct"
checkpoint = "model_files/SmolLM-135M"
tokenizer = None
model = None
def initialize_model():
global tokenizer, model
try:
tokenizer = AutoTokenizer.from_pretrained(checkpoint, local_files_only=True)
model = AutoModelForCausalLM.from_pretrained(checkpoint, local_files_only=True)
except Exception as e:
raise Exception(f"Error loading model: {str(e)}. Model path: {checkpoint}")
def interpret_text(text):
try:
if tokenizer is None or model is None:
initialize_model()
inputs = tokenizer.encode(text, return_tensors="pt")
outputs = model.generate(inputs, max_length=50)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
except Exception as e:
return f"Error in Python code: {str(e)}"
I have double-checked and confirmed that the file is indeed in the correct folder.
I suspect this might be related to the libraries I’m using, but I’m not sure how to proceed. Has anyone else encountered a similar issue, or does anyone have suggestions on how to resolve this?
Thanks in advance for your help!
Dave Soma is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.