2024-02-18 22:56:38 -06:00
import { GoogleGenerativeAI } from '@google/generative-ai' ;
2024-04-24 11:28:04 -07:00
2024-02-18 22:56:38 -06:00
export class Gemini {
2024-04-24 11:28:04 -07:00
constructor ( model _name , url ) {
this . model _name = model _name ;
this . url = url ;
2024-02-18 22:56:38 -06:00
if ( ! process . env . GEMINI _API _KEY ) {
2024-03-23 11:15:53 -05:00
throw new Error ( 'Gemini API key missing! Make sure you set your GEMINI_API_KEY environment variable.' ) ;
2024-02-18 22:56:38 -06:00
}
this . genAI = new GoogleGenerativeAI ( process . env . GEMINI _API _KEY ) ;
}
async sendRequest ( turns , systemMessage ) {
2024-04-24 11:28:04 -07:00
if ( this . url ) {
model = this . genAI . getGenerativeModel (
{ model : this . model _name || "gemini-pro" } ,
{ baseUrl : this . url }
) ;
} else {
model = this . genAI . getGenerativeModel (
{ model : this . model _name || "gemini-pro" }
) ;
}
2024-02-18 22:56:38 -06:00
const messages = [ { 'role' : 'system' , 'content' : systemMessage } ] . concat ( turns ) ;
let prompt = "" ;
let role = "" ;
messages . forEach ( ( message ) => {
role = message . role ;
if ( role === 'assistant' ) role = 'model' ;
prompt += ` ${ role } : ${ message . content } \n ` ;
} ) ;
if ( role !== "model" ) // if the last message was from the user/system, add a prompt for the model. otherwise, pretend we are extending the model's own message
prompt += "model: " ;
console . log ( prompt )
2024-04-24 11:28:04 -07:00
const result = await model . generateContent ( prompt ) ;
2024-02-18 22:56:38 -06:00
const response = await result . response ;
return response . text ( ) ;
}
async embed ( text ) {
2024-04-24 11:28:04 -07:00
if ( this . url ) {
model = this . genAI . getGenerativeModel (
{ model : this . model _name || "embedding-001" } ,
{ baseUrl : this . url }
) ;
} else {
model = this . genAI . getGenerativeModel (
{ model : this . model _name || "embedding-001" }
) ;
}
const result = await model . embedContent ( text ) ;
2024-02-18 22:56:38 -06:00
return result . embedding ;
}
}