Skip to content

Commit 667e78c

Browse files
authored
📦 chore: Remove @langchain/community & Related Legacy Code (danny-avila#10375)
* chore: remove `@langchain/community` dependency * refactor: remove SerpAPI integration and update related imports * chore: remove legacy code with unnecessary dependencies * chore: cleanup packages * chore: cleanup packages * chore: update openai dependency version to 5.10.1 * chore: add back @librechat/agents dependency * chore: downgrade openai dependency from 5.10.1 to 5.8.2 * Remove dependency on @librechat/agents from the API package * chore: add @librechat/agents dependency to the API package * fix: add useLegacyContent property to RunAgent type and propagate it in createRun function * chore: remove openai dependency version 5.10.1 from package.json
1 parent 8a4a5a4 commit 667e78c

28 files changed

Lines changed: 2828 additions & 7943 deletions

.env.example

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -298,10 +298,6 @@ GOOGLE_CSE_ID=
298298
#-----------------
299299
YOUTUBE_API_KEY=
300300

301-
# SerpAPI
302-
#-----------------
303-
SERPAPI_API_KEY=
304-
305301
# Stable Diffusion
306302
#-----------------
307303
SD_WEBUI_URL=http://host.docker.internal:7860

api/app/clients/OpenAIClient.js

Lines changed: 1 addition & 291 deletions
Original file line numberDiff line numberDiff line change
@@ -21,27 +21,17 @@ const {
2121
KnownEndpoints,
2222
openAISettings,
2323
ImageDetailCost,
24-
CohereConstants,
2524
getResponseSender,
2625
validateVisionModel,
2726
mapModelToAzureConfig,
2827
} = require('librechat-data-provider');
29-
const {
30-
truncateText,
31-
formatMessage,
32-
CUT_OFF_PROMPT,
33-
titleInstruction,
34-
createContextHandlers,
35-
} = require('./prompts');
3628
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
29+
const { formatMessage, createContextHandlers } = require('./prompts');
3730
const { spendTokens } = require('~/models/spendTokens');
3831
const { addSpaceIfNeeded } = require('~/server/utils');
3932
const { handleOpenAIErrors } = require('./tools/util');
4033
const { OllamaClient } = require('./OllamaClient');
41-
const { summaryBuffer } = require('./memory');
42-
const { runTitleChain } = require('./chains');
4334
const { extractBaseURL } = require('~/utils');
44-
const { tokenSplit } = require('./document');
4535
const BaseClient = require('./BaseClient');
4636

4737
class OpenAIClient extends BaseClient {
@@ -617,168 +607,6 @@ class OpenAIClient extends BaseClient {
617607
throw new Error('Deprecated');
618608
}
619609

620-
/**
621-
* Generates a concise title for a conversation based on the user's input text and response.
622-
* Uses either specified method or starts with the OpenAI `functions` method (using LangChain).
623-
* If the `functions` method fails, it falls back to the `completion` method,
624-
* which involves sending a chat completion request with specific instructions for title generation.
625-
*
626-
* @param {Object} params - The parameters for the conversation title generation.
627-
* @param {string} params.text - The user's input.
628-
* @param {string} [params.conversationId] - The current conversationId, if not already defined on client initialization.
629-
* @param {string} [params.responseText=''] - The AI's immediate response to the user.
630-
*
631-
* @returns {Promise<string | 'New Chat'>} A promise that resolves to the generated conversation title.
632-
* In case of failure, it will return the default title, "New Chat".
633-
*/
634-
async titleConvo({ text, conversationId, responseText = '' }) {
635-
const appConfig = this.options.req?.config;
636-
this.conversationId = conversationId;
637-
638-
if (this.options.attachments) {
639-
delete this.options.attachments;
640-
}
641-
642-
let title = 'New Chat';
643-
const convo = `||>User:
644-
"${truncateText(text)}"
645-
||>Response:
646-
"${JSON.stringify(truncateText(responseText))}"`;
647-
648-
const { OPENAI_TITLE_MODEL } = process.env ?? {};
649-
650-
let model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? openAISettings.model.default;
651-
if (model === Constants.CURRENT_MODEL) {
652-
model = this.modelOptions.model;
653-
}
654-
655-
const modelOptions = {
656-
// TODO: remove the gpt fallback and make it specific to endpoint
657-
model,
658-
temperature: 0.2,
659-
presence_penalty: 0,
660-
frequency_penalty: 0,
661-
max_tokens: 16,
662-
};
663-
664-
const azureConfig = appConfig?.endpoints?.[EModelEndpoint.azureOpenAI];
665-
666-
const resetTitleOptions = !!(
667-
(this.azure && azureConfig) ||
668-
(azureConfig && this.options.endpoint === EModelEndpoint.azureOpenAI)
669-
);
670-
671-
if (resetTitleOptions) {
672-
const { modelGroupMap, groupMap } = azureConfig;
673-
const {
674-
azureOptions,
675-
baseURL,
676-
headers = {},
677-
serverless,
678-
} = mapModelToAzureConfig({
679-
modelName: modelOptions.model,
680-
modelGroupMap,
681-
groupMap,
682-
});
683-
684-
this.options.headers = resolveHeaders({ headers });
685-
this.options.reverseProxyUrl = baseURL ?? null;
686-
this.langchainProxy = extractBaseURL(this.options.reverseProxyUrl);
687-
this.apiKey = azureOptions.azureOpenAIApiKey;
688-
689-
const groupName = modelGroupMap[modelOptions.model].group;
690-
this.options.addParams = azureConfig.groupMap[groupName].addParams;
691-
this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
692-
this.options.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
693-
this.azure = !serverless && azureOptions;
694-
if (serverless === true) {
695-
this.options.defaultQuery = azureOptions.azureOpenAIApiVersion
696-
? { 'api-version': azureOptions.azureOpenAIApiVersion }
697-
: undefined;
698-
this.options.headers['api-key'] = this.apiKey;
699-
}
700-
}
701-
702-
const titleChatCompletion = async () => {
703-
try {
704-
modelOptions.model = model;
705-
706-
if (this.azure) {
707-
modelOptions.model = process.env.AZURE_OPENAI_DEFAULT_MODEL ?? modelOptions.model;
708-
this.azureEndpoint = genAzureChatCompletion(this.azure, modelOptions.model, this);
709-
}
710-
711-
const instructionsPayload = [
712-
{
713-
role: this.options.titleMessageRole ?? (this.isOllama ? 'user' : 'system'),
714-
content: `Please generate ${titleInstruction}
715-
716-
${convo}
717-
718-
||>Title:`,
719-
},
720-
];
721-
722-
const promptTokens = this.getTokenCountForMessage(instructionsPayload[0]);
723-
724-
let useChatCompletion = true;
725-
726-
if (this.options.reverseProxyUrl === CohereConstants.API_URL) {
727-
useChatCompletion = false;
728-
}
729-
730-
title = (
731-
await this.sendPayload(instructionsPayload, {
732-
modelOptions,
733-
useChatCompletion,
734-
context: 'title',
735-
})
736-
).replaceAll('"', '');
737-
738-
const completionTokens = this.getTokenCount(title);
739-
740-
await this.recordTokenUsage({ promptTokens, completionTokens, context: 'title' });
741-
} catch (e) {
742-
logger.error(
743-
'[OpenAIClient] There was an issue generating the title with the completion method',
744-
e,
745-
);
746-
}
747-
};
748-
749-
if (this.options.titleMethod === 'completion') {
750-
await titleChatCompletion();
751-
logger.debug('[OpenAIClient] Convo Title: ' + title);
752-
return title;
753-
}
754-
755-
try {
756-
this.abortController = new AbortController();
757-
const llm = this.initializeLLM({
758-
...modelOptions,
759-
conversationId,
760-
context: 'title',
761-
tokenBuffer: 150,
762-
});
763-
764-
title = await runTitleChain({ llm, text, convo, signal: this.abortController.signal });
765-
} catch (e) {
766-
if (e?.message?.toLowerCase()?.includes('abort')) {
767-
logger.debug('[OpenAIClient] Aborted title generation');
768-
return;
769-
}
770-
logger.error(
771-
'[OpenAIClient] There was an issue generating title with LangChain, trying completion method...',
772-
e,
773-
);
774-
775-
await titleChatCompletion();
776-
}
777-
778-
logger.debug('[OpenAIClient] Convo Title: ' + title);
779-
return title;
780-
}
781-
782610
/**
783611
* Get stream usage as returned by this client's API response.
784612
* @returns {OpenAIUsageMetadata} The stream usage object.
@@ -833,124 +661,6 @@ ${convo}
833661
return currentMessageTokens > 0 ? currentMessageTokens : originalEstimate;
834662
}
835663

836-
async summarizeMessages({ messagesToRefine, remainingContextTokens }) {
837-
logger.debug('[OpenAIClient] Summarizing messages...');
838-
let context = messagesToRefine;
839-
let prompt;
840-
841-
// TODO: remove the gpt fallback and make it specific to endpoint
842-
const { OPENAI_SUMMARY_MODEL = openAISettings.model.default } = process.env ?? {};
843-
let model = this.options.summaryModel ?? OPENAI_SUMMARY_MODEL;
844-
if (model === Constants.CURRENT_MODEL) {
845-
model = this.modelOptions.model;
846-
}
847-
848-
const maxContextTokens =
849-
getModelMaxTokens(
850-
model,
851-
this.options.endpointType ?? this.options.endpoint,
852-
this.options.endpointTokenConfig,
853-
) ?? 4095; // 1 less than maximum
854-
855-
// 3 tokens for the assistant label, and 98 for the summarizer prompt (101)
856-
let promptBuffer = 101;
857-
858-
/*
859-
* Note: token counting here is to block summarization if it exceeds the spend; complete
860-
* accuracy is not important. Actual spend will happen after successful summarization.
861-
*/
862-
const excessTokenCount = context.reduce(
863-
(acc, message) => acc + message.tokenCount,
864-
promptBuffer,
865-
);
866-
867-
if (excessTokenCount > maxContextTokens) {
868-
({ context } = await this.getMessagesWithinTokenLimit({
869-
messages: context,
870-
maxContextTokens,
871-
}));
872-
}
873-
874-
if (context.length === 0) {
875-
logger.debug(
876-
'[OpenAIClient] Summary context is empty, using latest message within token limit',
877-
);
878-
879-
promptBuffer = 32;
880-
const { text, ...latestMessage } = messagesToRefine[messagesToRefine.length - 1];
881-
const splitText = await tokenSplit({
882-
text,
883-
chunkSize: Math.floor((maxContextTokens - promptBuffer) / 3),
884-
});
885-
886-
const newText = `${splitText[0]}\n...[truncated]...\n${splitText[splitText.length - 1]}`;
887-
prompt = CUT_OFF_PROMPT;
888-
889-
context = [
890-
formatMessage({
891-
message: {
892-
...latestMessage,
893-
text: newText,
894-
},
895-
userName: this.options?.name,
896-
assistantName: this.options?.chatGptLabel,
897-
}),
898-
];
899-
}
900-
// TODO: We can accurately count the tokens here before handleChatModelStart
901-
// by recreating the summary prompt (single message) to avoid LangChain handling
902-
903-
const initialPromptTokens = this.maxContextTokens - remainingContextTokens;
904-
logger.debug('[OpenAIClient] initialPromptTokens', initialPromptTokens);
905-
906-
const llm = this.initializeLLM({
907-
model,
908-
temperature: 0.2,
909-
context: 'summary',
910-
tokenBuffer: initialPromptTokens,
911-
});
912-
913-
try {
914-
const summaryMessage = await summaryBuffer({
915-
llm,
916-
debug: this.options.debug,
917-
prompt,
918-
context,
919-
formatOptions: {
920-
userName: this.options?.name,
921-
assistantName: this.options?.chatGptLabel ?? this.options?.modelLabel,
922-
},
923-
previous_summary: this.previous_summary?.summary,
924-
signal: this.abortController.signal,
925-
});
926-
927-
const summaryTokenCount = this.getTokenCountForMessage(summaryMessage);
928-
929-
if (this.options.debug) {
930-
logger.debug('[OpenAIClient] summaryTokenCount', summaryTokenCount);
931-
logger.debug(
932-
`[OpenAIClient] Summarization complete: remainingContextTokens: ${remainingContextTokens}, after refining: ${
933-
remainingContextTokens - summaryTokenCount
934-
}`,
935-
);
936-
}
937-
938-
return { summaryMessage, summaryTokenCount };
939-
} catch (e) {
940-
if (e?.message?.toLowerCase()?.includes('abort')) {
941-
logger.debug('[OpenAIClient] Aborted summarization');
942-
const { run, runId } = this.runManager.getRunByConversationId(this.conversationId);
943-
if (run && run.error) {
944-
const { error } = run;
945-
this.runManager.removeRun(runId);
946-
throw new Error(error);
947-
}
948-
}
949-
logger.error('[OpenAIClient] Error summarizing messages', e);
950-
return {};
951-
}
952-
}
953-
954664
/**
955665
* @param {object} params
956666
* @param {number} params.promptTokens

api/app/clients/agents/CustomAgent/CustomAgent.js

Lines changed: 0 additions & 50 deletions
This file was deleted.

0 commit comments

Comments
 (0)