Skip to main content

Class: LLMUtilityAI

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:65

Interface

IUtilityAI Defines the contract for a comprehensive Utility AI service.

Implements

Constructors

Constructor

new LLMUtilityAI(utilityId?): LLMUtilityAI

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:72

Parameters

utilityId?

string

Returns

LLMUtilityAI

Properties

utilityId

readonly utilityId: string

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:66

Implementation of

IUtilityAI.utilityId

Methods

analyzeSentiment()

analyzeSentiment(text, options?): Promise<SentimentResult>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:309

Parameters

text

string

options?

SentimentAnalysisOptions

Returns

Promise<SentimentResult>

Implementation of

IUtilityAI.analyzeSentiment


calculateReadability()

calculateReadability(text, options): Promise<ReadabilityResult>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:465

Parameters

text

string

options

ReadabilityOptions

Returns

Promise<ReadabilityResult>

Implementation of

IUtilityAI.calculateReadability


calculateSimilarity()

calculateSimilarity(text1, text2, options?): Promise<number>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:395

Parameters

text1

string

text2

string

options?

SimilarityOptions

Returns

Promise<number>

Implementation of

IUtilityAI.calculateSimilarity


checkHealth()

checkHealth(): Promise<{ dependencies?: object[]; details?: any; isHealthy: boolean; }>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:590

Returns

Promise<{ dependencies?: object[]; details?: any; isHealthy: boolean; }>

Implementation of

IUtilityAI.checkHealth


classifyText()

classifyText(textToClassify, options): Promise<ClassificationResult>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:257

Parameters

textToClassify

string

options

ClassificationOptions

Returns

Promise<ClassificationResult>

Implementation of

IUtilityAI.classifyText


detectLanguage()

detectLanguage(text, options?): Promise<LanguageDetectionResult[]>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:337

Parameters

text

string

options?

LanguageDetectionOptions

Returns

Promise<LanguageDetectionResult[]>

Implementation of

IUtilityAI.detectLanguage


extractKeywords()

extractKeywords(textToAnalyze, options?): Promise<string[]>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:287

Parameters

textToAnalyze

string

options?

KeywordExtractionOptions

Returns

Promise<string[]>

Implementation of

IUtilityAI.extractKeywords


generateNGrams()

generateNGrams(tokens, options): Promise<Record<number, string[][]>>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:445

Parameters

tokens

string[]

options

NGramOptions

Returns

Promise<Record<number, string[][]>>

Implementation of

IUtilityAI.generateNGrams


initialize()

initialize(config): Promise<void>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:77

Parameters

config

LLMUtilityAIConfig

Returns

Promise<void>

Implementation of

IUtilityAI.initialize


loadTrainedModel()

loadTrainedModel(): Promise<{ message?: string; success: boolean; }>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:618

Returns

Promise<{ message?: string; success: boolean; }>

Implementation of

IUtilityAI.loadTrainedModel


normalizeText()

normalizeText(text, options?): Promise<string>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:421

Parameters

text

string

options?

TextNormalizationOptions

Returns

Promise<string>

Implementation of

IUtilityAI.normalizeText


parseJsonSafe()

parseJsonSafe<T>(jsonString, options?): Promise<T | null>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:204

Safely parses a string that is expected to be JSON, potentially using an LLM to fix common issues.

Type Parameters

T

T = any

The expected type of the parsed JSON object.

Parameters

jsonString

string

The string to parse.

options?

ParseJsonOptions<T>

Options for parsing and fixing.

Returns

Promise<T | null>

The parsed object, or null if parsing and fixing fail.

Implementation of

IUtilityAI.parseJsonSafe


saveTrainedModel()

saveTrainedModel(): Promise<{ message?: string; pathOrStoreId?: string; success: boolean; }>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:615

Returns

Promise<{ message?: string; pathOrStoreId?: string; success: boolean; }>

Implementation of

IUtilityAI.saveTrainedModel


shutdown()

shutdown(): Promise<void>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:622

Returns

Promise<void>

Implementation of

IUtilityAI.shutdown


stemTokens()

stemTokens(tokens, _options?): Promise<string[]>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:389

Parameters

tokens

string[]

_options?

StemmingOptions

Returns

Promise<string[]>

Implementation of

IUtilityAI.stemTokens


summarize()

summarize(textToSummarize, options?): Promise<string>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:177

Parameters

textToSummarize

string

options?

SummarizationOptions

Returns

Promise<string>

Implementation of

IUtilityAI.summarize


summarizeConversationHistory()

summarizeConversationHistory(messages, targetTokenCount, modelInfo, preserveImportantMessages?): Promise<{ finalTokenCount: number; messagesSummarized: number; originalTokenCount: number; summaryMessages: ConversationMessage[]; }>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:516

Summarizes conversation history to fit within token constraints, attempting to preserve key information.

Parameters

messages

readonly ConversationMessage[]

The array of conversation messages to summarize.

targetTokenCount

number

The desired maximum token count for the summary.

modelInfo

Readonly<ModelTargetInfo>

Information about the model for which the summary is being prepared.

preserveImportantMessages?

boolean

If true, attempt to identify and keep important messages verbatim.

Returns

Promise<{ finalTokenCount: number; messagesSummarized: number; originalTokenCount: number; summaryMessages: ConversationMessage[]; }>

A summary (which might be a single system message or a condensed list of messages), and metadata about the summarization.

Implementation of

IPromptEngineUtilityAI.summarizeConversationHistory


summarizeRAGContext()

summarizeRAGContext(context, targetTokenCount, modelInfo, preserveSourceAttribution?): Promise<{ finalTokenCount: number; originalTokenCount: number; preservedSources?: string[]; summary: string; }>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:561

Summarizes retrieved RAG context to fit token limits, ideally preserving source attribution if possible.

Parameters

context

The RAG context to summarize.

string | readonly object[]

targetTokenCount

number

The desired maximum token count for the summarized context.

modelInfo

Readonly<ModelTargetInfo>

Information about the model.

preserveSourceAttribution?

boolean

If true, attempt to retain source information in the summary.

Returns

Promise<{ finalTokenCount: number; originalTokenCount: number; preservedSources?: string[]; summary: string; }>

The summarized text and metadata.

Implementation of

IPromptEngineUtilityAI.summarizeRAGContext


tokenize()

tokenize(text, options?): Promise<string[]>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:360

Parameters

text

string

options?

TokenizationOptions

Returns

Promise<string[]>

Implementation of

IUtilityAI.tokenize


trainModel()

trainModel(): Promise<{ message?: string; modelId?: string; success: boolean; }>

Defined in: packages/agentos/src/nlp/ai_utilities/LLMUtilityAI.ts:612

Returns

Promise<{ message?: string; modelId?: string; success: boolean; }>

Implementation of

IUtilityAI.trainModel