57 lines
1.5 KiB
TypeScript
57 lines
1.5 KiB
TypeScript
import "dotenv/config";
|
|
process.env.OPENAI_API_KEY;
|
|
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
|
|
import { ChatPromptTemplate } from "@langchain/core/prompts";
|
|
import { MemoryVectorStore } from "langchain/vectorstores/memory";
|
|
import { createRetrievalChain } from "langchain/chains/retrieval";
|
|
import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
|
|
|
|
import { CheerioWebBaseLoader } from "langchain/document_loaders/web/cheerio";
|
|
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";
|
|
|
|
const chatModel = new ChatOpenAI({});
|
|
const loader = new CheerioWebBaseLoader("https://docs.smith.langchain.com/");
|
|
|
|
const docs = await loader.load();
|
|
|
|
const splitter = new RecursiveCharacterTextSplitter();
|
|
|
|
const splitDocs = await splitter.splitDocuments(docs);
|
|
|
|
const embeddings = new OpenAIEmbeddings();
|
|
|
|
const vectorstore = await MemoryVectorStore.fromDocuments(
|
|
splitDocs,
|
|
embeddings
|
|
);
|
|
|
|
console.log('vectorstore', vectorstore)
|
|
|
|
const retriever = vectorstore.asRetriever();
|
|
|
|
const prompt = ChatPromptTemplate.fromTemplate(
|
|
`Answer the following question based only on the provided context:
|
|
|
|
<context>
|
|
{context}
|
|
</context>
|
|
|
|
Question: {input}`
|
|
);
|
|
|
|
const documentChain = await createStuffDocumentsChain({
|
|
llm: chatModel,
|
|
prompt,
|
|
});
|
|
|
|
const retrievalChain = await createRetrievalChain({
|
|
combineDocsChain: documentChain,
|
|
retriever,
|
|
});
|
|
|
|
const res = await retrievalChain.invoke({
|
|
input: "What is langsmith",
|
|
});
|
|
|
|
console.log(res);
|