I am working on an app where you can interact ai to ask questions about the pdf you have uploaded. The issue that I’m facing is, whenever I interact with AI through the client side the follow error comes up ‘TypeError: messagesRef.current.concat is not a function’. I am not sure where this issue is in my code, and am having a hard time figuring out what’s causing it.
I’ve tried changing the line initialMessages: data || [],
to initialMessages: Array.isArray(data) ? data : [],
. From this code the issue doesnt show anymore, but the messages given by ai, and the questions asked to ai through the client don’t get saved to the database. I’ve added console.log statements throughout my code to catch any errors on why it’s not saving, but it seems everything should be saved. I am using NeonDB and DrizzleORM for the database.
ChatComponent.tsx:
"use client";
import React from "react";
import { Input } from "./ui/input";
import { useChat } from "ai/react";
import { Button } from "./ui/button";
import { Send } from "lucide-react";
import MessageList from "./MessageList";
import { useQuery } from "@tanstack/react-query";
import axios from "axios";
import { Message } from "ai";
type Props = { chatId: number };
const ChatComponent = ({ chatId }: Props) => {
const { data, isLoading } = useQuery({
queryKey: ["chat", chatId],
queryFn: async () => {
const response = await axios.post<Message[]>("/api/get-messages", {
chatId,
});
return response.data;
},
});
const { input, handleInputChange, handleSubmit, messages } = useChat({
api: "/api/chat",
body: {
chatId,
},
initialMessages: Array.isArray(data) ? data : [],
}
);
React.useEffect(() => {
const messageContainer = document.getElementById("message-container");
if (messageContainer) {
messageContainer.scrollTo({
top: messageContainer.scrollHeight,
behavior: "smooth",
});
}
}, [messages]);
return (
<div
className="relative max-h-screen overflow-scroll"
id="message-container"
>
{/* header */}
<div className="sticky top-0 inset-x-0 p-2 bg-white h-fit">
<h3 className="text-xl font-bold">Chat</h3>
</div>
{/* message list */}
<MessageList messages={messages} isLoading={isLoading} />
<form
onSubmit={handleSubmit}
className="sticky bottom-0 inset-x-0 px-2 py-4 bg-white"
>
<div className="flex">
<Input
value={input}
onChange={handleInputChange}
placeholder="Ask any question..."
className="w-full"
/>
<Button className="bg-blue-600 ml-2">
<Send className="h-4 w-4" />
</Button>
</div>
</form>
</div>
);
};
export default ChatComponent;
/api/chat:
import { Configuration, OpenAIApi } from "openai-edge";
import { Message, OpenAIStream, StreamingTextResponse } from "ai";
import { getContext } from "@/lib/context";
import { db } from "@/lib/db";
import { chats, messages as _messages } from "@/lib/db/schema";
import { eq } from "drizzle-orm";
import { NextResponse } from "next/server";
export const runtime = "edge";
const config = new Configuration({
apiKey: process.env.OPENAI_API_KEY,
});
const openai = new OpenAIApi(config);
export async function POST(req: Request) {
try {
const { messages, chatId } = await req.json();
const _chats = await db.select().from(chats).where(eq(chats.id, chatId));
if (_chats.length != 1) {
return NextResponse.json({ error: "chat not found" }, { status: 404 });
}
const fileKey = _chats[0].fileKey;
const lastMessage = messages[messages.length - 1];
console.log("Last user message:", lastMessage.content);
const context = await getContext(lastMessage.content, fileKey);
const prompt = {
role: "system",
content: `AI assistant is a brand new, powerful, human-like artificial intelligence.
The traits of AI include expert knowledge, helpfulness, cleverness, and articulateness.
AI is a well-behaved and well-mannered individual.
AI is always friendly, kind, and inspiring, and he is eager to provide vivid and thoughtful responses to the user.
AI has the sum of all knowledge in their brain, and is able to accurately answer nearly any question about any topic in conversation.
AI assistant is a big fan of Pinecone and Vercel.
START CONTEXT BLOCK
${context}
END OF CONTEXT BLOCK
AI assistant will take into account any CONTEXT BLOCK that is provided in a conversation.
If the context does not provide the answer to question, the AI assistant will say, "I'm sorry, but I don't know the answer to that question".
AI assistant will not apologize for previous responses, but instead will indicated new information was gained.
AI assistant will not invent anything that is not drawn directly from the context.
`,
};
const response = await openai.createChatCompletion({
model: "gpt-3.5-turbo",
messages: [
prompt,
...messages.filter((message: Message) => message.role === "user"),
],
stream: true,
});
const stream = OpenAIStream(response, {
onStart: async () => {
try {
// save user message into db
await db.insert(_messages).values({
chatId,
content: lastMessage.content,
role: "user",
});
console.log("User message saved to db:", lastMessage.content); // Log user message insertion
} catch (error) {
console.error("Error saving user message to db:", error); // Log any errors during user message insertion
}
},
onCompletion: async (completion) => {
try {
// save ai message into db
await db.insert(_messages).values({
chatId,
content: completion,
role: "system",
});
console.log("AI message saved to db:", completion); // Log AI message insertion
} catch (error) {
console.error("Error saving AI message to db:", error); // Log any errors during AI message insertion
}
},
});
return new StreamingTextResponse(stream);
} catch (error) {
console.error("Error processing message:", error);
}
}
MessageList.tsx:
import { cn } from "@/lib/utils";
import { Message } from "ai/react";
import { Loader2 } from "lucide-react";
import React from "react";
type Props = {
isLoading: boolean;
messages: Message[];
};
const MessageList = ({ messages, isLoading }: Props) => {
if (isLoading) {
return (
<div className="absolute top-1/2 left-1/2 -translate-x-1/2 -translate-y-1/2">
<Loader2 className="w-6 h-6 animate-spin" />
</div>
);
}
if (!messages) return <></>;
return (
<div className="flex flex-col gap-2 px-4">
{messages.map((message) => {
return (
<div
key={message.id}
className={cn("flex", {
"justify-end pl-10": message.role === "user",
"justify-start pr-10": message.role === "assistant",
})}
>
<div
className={cn(
"rounded-lg mb-2 px-3 text-sm py-1 shadow-md ring-1 ring-gray-900/10",
{
"bg-blue-600 text-white": message.role === "user",
}
)}
>
<p>{message.content}</p>
</div>
</div>
);
})}
</div>
);
};
export default MessageList;
/api/get-messages:
import { db } from "@/lib/db"
import { messages } from "@/lib/db/schema"
import { eq } from "drizzle-orm"
import { NextResponse } from "next/server"
export const runtime = "edge"
export const POST = async (req: Request) => {
try {
const { chatId } = await req.json();
if (!chatId) {
return NextResponse.json({ error: "Chat ID is required" }, { status: 400 });
}
const _messages = await db.select().from(messages).where(eq(messages.chatId, chatId));
return NextResponse.json({ messages: _messages });
} catch (error) {
console.error("Error in /api/get-messages", error);
return NextResponse.json({ error: "Internal Server Error" }, { status: 500 });
}
};
schema.ts:
import {
integer,
pgEnum,
pgTable,
serial,
text,
timestamp,
varchar,
} from "drizzle-orm/pg-core";
export const userSystemEnum = pgEnum("user_system_enum", ["system", "user"]);
export const chats = pgTable("chats", {
id: serial("id").primaryKey(),
pdfName: text("pdf_name").notNull(),
pdfUrl: text("pdf_url").notNull(),
createdAt: timestamp("created_at").notNull().defaultNow(),
userId: varchar("user_id", { length: 256 }).notNull(),
fileKey: text("file_key").notNull(),
});
export type DrizzleChat = typeof chats.$inferSelect;
export const messages = pgTable("messages", {
id: serial("id").primaryKey(),
chatId: integer("chat_id")
.references(() => chats.id)
.notNull(),
content: text("content").notNull(),
createdAt: timestamp("created_at").notNull().defaultNow(),
role: userSystemEnum("role").notNull(),
});
export const userSubscriptions = pgTable("user_subscriptions", {
id: serial("id").primaryKey(),
userId: varchar("user_id", { length: 256 }).notNull().unique(),
stripeCustomerId: varchar("stripe_customer_id", { length: 256 })
.notNull()
.unique(),
stripeSubscriptionId: varchar("stripe_subscription_id", { length: 256 })
.notNull()
.unique(),
stripePriceId: varchar("stripe_price_id", { length: 256 }),
stripeCurrentPeriodEnd: timestamp("stripe_current_period_end"),
});
context.ts:
import { Pinecone } from "@pinecone-database/pinecone";
import { convertToAscii } from "./utils";
import { getEmbeddings } from "./embeddings";
export async function getMatchesFromEmbeddings(
embeddings: number[],
fileKey: string
) {
const apiKey = process.env.PINECONE_API_KEY;
const pinecone = new Pinecone({
apiKey: apiKey!,
});
const index = await pinecone.Index("papertalk");
try {
const namespace = convertToAscii(fileKey);
const queryResponse = await index.namespace(namespace).query({
vector: embeddings,
topK: 5,
includeMetadata: true,
});
return queryResponse.matches || [];
} catch (error) {
console.log("error querying embeddings", error);
throw error;
}
}
export async function getContext(query: string, fileKey: string) {
const queryEmbeddings = await getEmbeddings(query);
const matches = await getMatchesFromEmbeddings(queryEmbeddings, fileKey);
const qualifyingDocs = matches.filter(
(match) => match.score && match.score > 0.7
);
type Metadata = {
text: string;
pageNumber: number;
};
let docs = qualifyingDocs.map((match) => (match.metadata as Metadata).text);
// 5 vectors
return docs.join("n").substring(0, 3000);
}