2024-04-10 15:56:36 +02:00
import OpenAIApi from 'openai' ;
import axios from 'axios' ;
import { readFileSync } from 'fs' ;
let localSettings = JSON . parse ( readFileSync ( './local-config.json' , 'utf8' ) ) ;
function getContentInBrackets ( str ) {
const startIndex = str . indexOf ( "[" ) ;
const endIndex = str . indexOf ( "]" ) ;
if ( startIndex !== - 1 && endIndex !== - 1 && endIndex > startIndex ) {
return str . substring ( startIndex + 1 , endIndex ) ;
} else {
return "" ;
}
}
export class Local {
constructor ( model _name ) {
this . model _name = getContentInBrackets ( model _name ) ;
let localConfig = null ;
2024-04-10 16:07:37 +02:00
localSettings [ "url" ] = localSettings [ "url" ] . replace ( "/v1" , "" ) ;
2024-04-10 15:56:36 +02:00
if ( this . model _name == "" ) {
throw new Error ( 'Model is not specified! Please ensure you input the model in the following format: ollama[model]. For example, for Mistral instruct, use: ollama[mistral:instruct]' ) ;
}
2024-04-10 16:07:37 +02:00
axios . get ( localSettings [ "url" ] ) . then ( response => {
2024-04-10 15:56:36 +02:00
if ( response . status === 200 ) {
localConfig = {
2024-04-10 16:07:37 +02:00
baseURL : ` ${ localSettings [ "url" ] } /v1 ` ,
2024-04-10 15:56:36 +02:00
apiKey : localSettings [ "api_key" ] ,
} ;
this . openai = new OpenAIApi ( localConfig ) ;
}
else {
throw new Error ( ` Error relating the endpoint: ${ response . status } . ` ) ;
}
} ) ;
}
async sendRequest ( turns , systemMessage , stop _seq = '***' ) {
let messages = [ { 'role' : 'system' , 'content' : systemMessage } ] . concat ( turns ) ;
let res = null ;
try {
console . log ( ` Awaiting local response... (model: ${ this . model _name } ) ` )
console . log ( 'Messages:' , messages ) ;
let completion = await this . openai . chat . completions . create ( {
model : this . model _name ,
messages : messages ,
stop : stop _seq ,
} ) ;
if ( completion . choices [ 0 ] . finish _reason == 'length' )
throw new Error ( 'Context length exceeded' ) ;
console . log ( 'Received.' )
res = completion . choices [ 0 ] . message . content ;
}
catch ( err ) {
if ( ( err . message == 'Context length exceeded' || err . code == 'context_length_exceeded' ) && turns . length > 1 ) {
console . log ( 'Context length exceeded, trying again with shorter context.' ) ;
return await sendRequest ( turns . slice ( 1 ) , systemMessage , stop _seq ) ;
} else {
console . log ( err ) ;
res = 'My brain disconnected, try again.' ;
}
}
return res ;
}
async embed ( text ) {
try {
if ( localSettings [ "api_key" ] == "ollama" ) { //Embedding if it is Ollama (temporary)
2024-04-10 16:07:37 +02:00
const response = await axios . post ( ` ${ localSettings [ "url" ] } /api/embeddings ` , {
2024-04-10 15:56:36 +02:00
model : localSettings [ "embedding_model" ] ,
prompt : text
} ) ;
return response . data . embedding ;
}
const embedding = await this . openai . embeddings . create ( {
model : localSettings [ "embedding_model" ] ,
input : text ,
encoding _format : "float" ,
} ) ;
return embedding . data [ 0 ] . embedding ;
} catch ( error ) {
console . log ( 'Error embedding text:' , error . response ? error . response . data : error . message ) ;
return Array ( 1 ) . fill ( ) . map ( ( ) => Math . random ( ) ) ;
}
}
}