mirror of
https://github.com/kolbytn/mindcraft.git
synced 2025-08-16 12:15:36 +02:00
Add files via upload
Uploaded the changes to mindcraft that allows Gemini thinking models, and glhf.chat and hyperbolic api's
This commit is contained in:
parent
afe43c000e
commit
d0d1e45c8c
8 changed files with 237 additions and 9 deletions
|
@ -7,5 +7,7 @@
|
||||||
"GROQCLOUD_API_KEY": "",
|
"GROQCLOUD_API_KEY": "",
|
||||||
"HUGGINGFACE_API_KEY": "",
|
"HUGGINGFACE_API_KEY": "",
|
||||||
"QWEN_API_KEY": "",
|
"QWEN_API_KEY": "",
|
||||||
"XAI_API_KEY": ""
|
"XAI_API_KEY": "",
|
||||||
}
|
"GHLF_API_KEY": "",
|
||||||
|
"HYPERBOLIC_API_KEY": ""
|
||||||
|
}
|
5
profiles/GLFH.json
Normal file
5
profiles/GLFH.json
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
{
|
||||||
|
"name": "Good_luck_have_fun",
|
||||||
|
|
||||||
|
"model": "hf:meta-llama/Llama-3.1-405B-Instruct"
|
||||||
|
}
|
6
profiles/hyperbolic.json
Normal file
6
profiles/hyperbolic.json
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
{
|
||||||
|
"name": "Hyperbolic",
|
||||||
|
|
||||||
|
"model": "hb:deepseek-ai/DeepSeek-V3"
|
||||||
|
|
||||||
|
}
|
|
@ -18,6 +18,8 @@ export default
|
||||||
// "./profiles/llama.json",
|
// "./profiles/llama.json",
|
||||||
// "./profiles/qwen.json",
|
// "./profiles/qwen.json",
|
||||||
// "./profiles/grok.json",
|
// "./profiles/grok.json",
|
||||||
|
// "./profiles/GLHF.json",
|
||||||
|
// "./profiles/hyperbolic.json"
|
||||||
|
|
||||||
// using more than 1 profile requires you to /msg each bot indivually
|
// using more than 1 profile requires you to /msg each bot indivually
|
||||||
],
|
],
|
||||||
|
|
|
@ -13,12 +13,16 @@ import { Local } from '../models/local.js';
|
||||||
import { Novita } from '../models/novita.js';
|
import { Novita } from '../models/novita.js';
|
||||||
import { GroqCloudAPI } from '../models/groq.js';
|
import { GroqCloudAPI } from '../models/groq.js';
|
||||||
import { HuggingFace } from '../models/huggingface.js';
|
import { HuggingFace } from '../models/huggingface.js';
|
||||||
|
import { glhf } from '../models/glhf.js';
|
||||||
|
import { hyperbolic } from '../models/hyperbolic.js';
|
||||||
import { Qwen } from "../models/qwen.js";
|
import { Qwen } from "../models/qwen.js";
|
||||||
import { Grok } from "../models/grok.js";
|
import { Grok } from "../models/grok.js";
|
||||||
|
|
||||||
|
|
||||||
export class Prompter {
|
export class Prompter {
|
||||||
constructor(agent, fp) {
|
constructor(agent, fp, agentName) {
|
||||||
this.agent = agent;
|
this.agent = agent;
|
||||||
|
this.agentName = agentName;
|
||||||
this.profile = JSON.parse(readFileSync(fp, 'utf8'));
|
this.profile = JSON.parse(readFileSync(fp, 'utf8'));
|
||||||
this.default_profile = JSON.parse(readFileSync('./profiles/_default.json', 'utf8'));
|
this.default_profile = JSON.parse(readFileSync('./profiles/_default.json', 'utf8'));
|
||||||
|
|
||||||
|
@ -50,14 +54,18 @@ export class Prompter {
|
||||||
chat.api = 'anthropic';
|
chat.api = 'anthropic';
|
||||||
else if (chat.model.includes('huggingface/'))
|
else if (chat.model.includes('huggingface/'))
|
||||||
chat.api = "huggingface";
|
chat.api = "huggingface";
|
||||||
|
else if (chat.model.includes('hf:'))
|
||||||
|
chat.api = "glhf";
|
||||||
|
else if (chat.model.includes('hyperbolic:')|| chat.model.includes('hb:'))
|
||||||
|
chat.api = "hyperbolic";
|
||||||
else if (chat.model.includes('meta/') || chat.model.includes('mistralai/') || chat.model.includes('replicate/'))
|
else if (chat.model.includes('meta/') || chat.model.includes('mistralai/') || chat.model.includes('replicate/'))
|
||||||
chat.api = 'replicate';
|
chat.api = 'replicate';
|
||||||
else if (chat.model.includes("groq/") || chat.model.includes("groqcloud/"))
|
else if (chat.model.includes("groq/") || chat.model.includes("groqcloud/"))
|
||||||
chat.api = 'groq';
|
chat.api = 'groq';
|
||||||
else if (chat.model.includes('novita/'))
|
else if (chat.model.includes('novita/'))
|
||||||
chat.api = 'novita';
|
chat.api = 'novita';
|
||||||
else if (chat.model.includes('qwen'))
|
else if (chat.model.includes('qwen'))
|
||||||
chat.api = 'qwen';
|
chat.api = 'qwen';
|
||||||
else if (chat.model.includes('grok'))
|
else if (chat.model.includes('grok'))
|
||||||
chat.api = 'xai';
|
chat.api = 'xai';
|
||||||
else
|
else
|
||||||
|
@ -81,6 +89,11 @@ export class Prompter {
|
||||||
}
|
}
|
||||||
else if (chat.api === 'huggingface')
|
else if (chat.api === 'huggingface')
|
||||||
this.chat_model = new HuggingFace(chat.model, chat.url);
|
this.chat_model = new HuggingFace(chat.model, chat.url);
|
||||||
|
else if (chat.api === 'glhf')
|
||||||
|
this.chat_model = new glhf(chat.model, chat.url);
|
||||||
|
else if (chat.api === 'hyperbolic') {
|
||||||
|
this.chat_model = new hyperbolic(chat.model.replace('hyperbolic:', '').replace('hb:', ''), chat.url, max_tokens ? max_tokens : 8192);
|
||||||
|
}
|
||||||
else if (chat.api === 'novita')
|
else if (chat.api === 'novita')
|
||||||
this.chat_model = new Novita(chat.model.replace('novita/', ''), chat.url);
|
this.chat_model = new Novita(chat.model.replace('novita/', ''), chat.url);
|
||||||
else if (chat.api === 'qwen')
|
else if (chat.api === 'qwen')
|
||||||
|
@ -235,7 +248,12 @@ export class Prompter {
|
||||||
}
|
}
|
||||||
let prompt = this.profile.conversing;
|
let prompt = this.profile.conversing;
|
||||||
prompt = await this.replaceStrings(prompt, messages, this.convo_examples);
|
prompt = await this.replaceStrings(prompt, messages, this.convo_examples);
|
||||||
|
console.log("DEBUG - promptConvo - agentName:", this.agent.name); // DEBUG
|
||||||
|
console.log("DEBUG - promptConvo - prompt:", prompt); // DEBUG: Inspect this prompt
|
||||||
|
|
||||||
|
|
||||||
let generation = await this.chat_model.sendRequest(messages, prompt);
|
let generation = await this.chat_model.sendRequest(messages, prompt);
|
||||||
|
|
||||||
// in conversations >2 players LLMs tend to hallucinate and role-play as other bots
|
// in conversations >2 players LLMs tend to hallucinate and role-play as other bots
|
||||||
// the FROM OTHER BOT tag should never be generated by the LLM
|
// the FROM OTHER BOT tag should never be generated by the LLM
|
||||||
if (generation.includes('(FROM OTHER BOT)')) {
|
if (generation.includes('(FROM OTHER BOT)')) {
|
||||||
|
@ -260,7 +278,13 @@ export class Prompter {
|
||||||
await this.checkCooldown();
|
await this.checkCooldown();
|
||||||
let prompt = this.profile.coding;
|
let prompt = this.profile.coding;
|
||||||
prompt = await this.replaceStrings(prompt, messages, this.coding_examples);
|
prompt = await this.replaceStrings(prompt, messages, this.coding_examples);
|
||||||
|
console.log("DEBUG - promptCoding - agentName:", this.agent.name); // DEBUG
|
||||||
|
console.log("DEBUG - promptCoding - prompt:", prompt); // DEBUG: Inspect this prompt
|
||||||
|
|
||||||
|
|
||||||
let resp = await this.chat_model.sendRequest(messages, prompt);
|
let resp = await this.chat_model.sendRequest(messages, prompt);
|
||||||
|
|
||||||
|
|
||||||
this.awaiting_coding = false;
|
this.awaiting_coding = false;
|
||||||
return resp;
|
return resp;
|
||||||
}
|
}
|
||||||
|
@ -269,7 +293,14 @@ export class Prompter {
|
||||||
await this.checkCooldown();
|
await this.checkCooldown();
|
||||||
let prompt = this.profile.saving_memory;
|
let prompt = this.profile.saving_memory;
|
||||||
prompt = await this.replaceStrings(prompt, null, null, to_summarize);
|
prompt = await this.replaceStrings(prompt, null, null, to_summarize);
|
||||||
return await this.chat_model.sendRequest([], prompt);
|
console.log("DEBUG - promptMemSaving - agentName:", this.agent.name); // DEBUG
|
||||||
|
console.log("DEBUG - promptMemSaving - prompt:", prompt); // DEBUG: Inspect this prompt
|
||||||
|
|
||||||
|
|
||||||
|
const response = await this.chat_model.sendRequest([], prompt);
|
||||||
|
|
||||||
|
|
||||||
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
async promptShouldRespondToBot(new_message) {
|
async promptShouldRespondToBot(new_message) {
|
||||||
|
@ -289,9 +320,10 @@ export class Prompter {
|
||||||
let user_message = 'Use the below info to determine what goal to target next\n\n';
|
let user_message = 'Use the below info to determine what goal to target next\n\n';
|
||||||
user_message += '$LAST_GOALS\n$STATS\n$INVENTORY\n$CONVO'
|
user_message += '$LAST_GOALS\n$STATS\n$INVENTORY\n$CONVO'
|
||||||
user_message = await this.replaceStrings(user_message, messages, null, null, last_goals);
|
user_message = await this.replaceStrings(user_message, messages, null, null, last_goals);
|
||||||
let user_messages = [{role: 'user', content: user_message}];
|
|
||||||
|
|
||||||
let res = await this.chat_model.sendRequest(user_messages, system_message);
|
let res = await this.chat_model.sendRequest(user_messages, system_message);
|
||||||
|
|
||||||
|
|
||||||
let goal = null;
|
let goal = null;
|
||||||
try {
|
try {
|
||||||
|
@ -307,4 +339,4 @@ export class Prompter {
|
||||||
goal.quantity = parseInt(goal.quantity);
|
goal.quantity = parseInt(goal.quantity);
|
||||||
return goal;
|
return goal;
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -52,7 +52,27 @@ export class Gemini {
|
||||||
console.log('Awaiting Google API response...');
|
console.log('Awaiting Google API response...');
|
||||||
const result = await model.generateContent(prompt);
|
const result = await model.generateContent(prompt);
|
||||||
const response = await result.response;
|
const response = await result.response;
|
||||||
const text = response.text();
|
|
||||||
|
// got rid of the original method of const text = response.text to allow gemini thinking models to play minecraft :)
|
||||||
|
let text;
|
||||||
|
if (this.model_name && this.model_name.includes("thinking")) {
|
||||||
|
if (response.candidates && response.candidates.length > 0 && response.candidates[0].content && response.candidates[0].content.parts && response.candidates[0].content.parts.length > 1) {
|
||||||
|
|
||||||
|
text = response.candidates[0].content.parts[1].text;
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
console.warn("Unexpected response structure for thinking model:", response);
|
||||||
|
text = response.text();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
|
||||||
|
text = response.text();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
console.log('Received.');
|
console.log('Received.');
|
||||||
if (!text.includes(stop_seq)) return text;
|
if (!text.includes(stop_seq)) return text;
|
||||||
const idx = text.indexOf(stop_seq);
|
const idx = text.indexOf(stop_seq);
|
||||||
|
|
62
src/models/glhf.js
Normal file
62
src/models/glhf.js
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
import OpenAIApi from 'openai';
|
||||||
|
import { getKey } from '../utils/keys.js';
|
||||||
|
|
||||||
|
// glhf doesn't supply an SDK for their models, but fully supports OpenAI SDKs
|
||||||
|
export class glhf {
|
||||||
|
constructor(model_name, url) {
|
||||||
|
this.model_name = model_name;
|
||||||
|
|
||||||
|
// Retrieve the API key from keys.json
|
||||||
|
const apiKey = getKey('GHLF_API_KEY');
|
||||||
|
if (!apiKey) {
|
||||||
|
throw new Error('API key not found. Please check keys.json and ensure GHLF_API_KEY is defined.');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Configure OpenAIApi with the retrieved API key and base URL
|
||||||
|
this.openai = new OpenAIApi({
|
||||||
|
apiKey,
|
||||||
|
baseURL: url || "https://glhf.chat/api/openai/v1"
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async sendRequest(turns, systemMessage, stop_seq = '***') {
|
||||||
|
// Construct the message array for the API request
|
||||||
|
let messages = [{ 'role': 'system', 'content': systemMessage }].concat(turns);
|
||||||
|
|
||||||
|
const pack = {
|
||||||
|
model: this.model_name || "hf:meta-llama/Llama-3.1-405B-Instruct",
|
||||||
|
messages,
|
||||||
|
stop: [stop_seq]
|
||||||
|
};
|
||||||
|
|
||||||
|
let res = null;
|
||||||
|
try {
|
||||||
|
console.log('Awaiting glhf.chat API response...');
|
||||||
|
// Uncomment the line below if you need to debug the messages
|
||||||
|
// console.log('Messages:', messages);
|
||||||
|
|
||||||
|
let completion = await this.openai.chat.completions.create(pack);
|
||||||
|
if (completion.choices[0].finish_reason === 'length') {
|
||||||
|
throw new Error('Context length exceeded');
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('Received.');
|
||||||
|
res = completion.choices[0].message.content;
|
||||||
|
} catch (err) {
|
||||||
|
if ((err.message === 'Context length exceeded' || err.code === 'context_length_exceeded') && turns.length > 1) {
|
||||||
|
console.log('Context length exceeded, trying again with shorter context.');
|
||||||
|
return await this.sendRequest(turns.slice(1), systemMessage, stop_seq);
|
||||||
|
} else {
|
||||||
|
console.log(err);
|
||||||
|
res = 'My brain disconnected, try again.';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replace special tokens in the response
|
||||||
|
return res.replace(/<\|separator\|>/g, '*no response*');
|
||||||
|
}
|
||||||
|
|
||||||
|
async embed(text) {
|
||||||
|
throw new Error('Embeddings are not supported by glhf.');
|
||||||
|
}
|
||||||
|
}
|
99
src/models/hyperbolic.js
Normal file
99
src/models/hyperbolic.js
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
import { getKey } from '../utils/keys.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* Yes, this code was written by an Ai. It was written by GPT-o1 and tested :)
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
export class hyperbolic {
|
||||||
|
constructor(modelName, apiUrl) {
|
||||||
|
this.modelName = modelName || "deepseek-ai/DeepSeek-V3";
|
||||||
|
this.apiUrl = apiUrl || "https://api.hyperbolic.xyz/v1/chat/completions";
|
||||||
|
|
||||||
|
// Retrieve the Hyperbolic API key from keys.js
|
||||||
|
this.apiKey = getKey('HYPERBOLIC_API_KEY');
|
||||||
|
if (!this.apiKey) {
|
||||||
|
throw new Error('HYPERBOLIC_API_KEY not found. Check your keys.js file.');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sends a chat completion request to the Hyperbolic endpoint.
|
||||||
|
*
|
||||||
|
* @param {Array} turns - An array of message objects, e.g. [{role: 'user', content: 'Hi'}].
|
||||||
|
* @param {string} systemMessage - The system prompt or instruction.
|
||||||
|
* @param {string} stopSeq - A string that represents a stopping sequence, default '***'.
|
||||||
|
* @returns {Promise<string>} - The content of the model's reply.
|
||||||
|
*/
|
||||||
|
async sendRequest(turns, systemMessage, stopSeq = '***') {
|
||||||
|
// Prepare the messages with a system prompt at the beginning
|
||||||
|
const messages = [{ role: 'system', content: systemMessage }, ...turns];
|
||||||
|
|
||||||
|
// Build the request payload (mirroring your original structure)
|
||||||
|
const payload = {
|
||||||
|
model: this.modelName,
|
||||||
|
messages: messages,
|
||||||
|
max_tokens: 8192,
|
||||||
|
temperature: 0.7,
|
||||||
|
top_p: 0.9,
|
||||||
|
stream: false
|
||||||
|
};
|
||||||
|
|
||||||
|
let completionContent = null;
|
||||||
|
|
||||||
|
try {
|
||||||
|
console.log('Awaiting Hyperbolic API response...');
|
||||||
|
console.log('Messages:', messages);
|
||||||
|
|
||||||
|
const response = await fetch(this.apiUrl, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Authorization': `Bearer ${this.apiKey}`
|
||||||
|
},
|
||||||
|
body: JSON.stringify(payload)
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`HTTP error! status: ${response.status}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = await response.json();
|
||||||
|
if (
|
||||||
|
data?.choices?.[0]?.finish_reason &&
|
||||||
|
data.choices[0].finish_reason === 'length'
|
||||||
|
) {
|
||||||
|
throw new Error('Context length exceeded');
|
||||||
|
}
|
||||||
|
|
||||||
|
completionContent = data?.choices?.[0]?.message?.content || '';
|
||||||
|
console.log('Received response from Hyperbolic.');
|
||||||
|
|
||||||
|
} catch (err) {
|
||||||
|
if (
|
||||||
|
(err.message === 'Context length exceeded' ||
|
||||||
|
err.code === 'context_length_exceeded') &&
|
||||||
|
turns.length > 1
|
||||||
|
) {
|
||||||
|
console.log('Context length exceeded, trying again with a shorter context...');
|
||||||
|
// Remove the first user turn and try again (like the original code).
|
||||||
|
return await this.sendRequest(turns.slice(1), systemMessage, stopSeq);
|
||||||
|
} else {
|
||||||
|
console.log(err);
|
||||||
|
completionContent = 'My brain disconnected, try again.';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replace any special tokens from your original code if needed
|
||||||
|
return completionContent.replace(/<\|separator\|>/g, '*no response*');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Embeddings are not supported in your original snippet, so we mirror that error.
|
||||||
|
*/
|
||||||
|
async embed(text) {
|
||||||
|
throw new Error('Embeddings are not supported by Hyperbolic.');
|
||||||
|
}
|
||||||
|
}
|
Loading…
Add table
Reference in a new issue